GNU Linux-libre 5.10.215-gnu1
[releases.git] / drivers / android / binder.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2008 Google, Inc.
7  */
8
9 /*
10  * Locking overview
11  *
12  * There are 3 main spinlocks which must be acquired in the
13  * order shown:
14  *
15  * 1) proc->outer_lock : protects binder_ref
16  *    binder_proc_lock() and binder_proc_unlock() are
17  *    used to acq/rel.
18  * 2) node->lock : protects most fields of binder_node.
19  *    binder_node_lock() and binder_node_unlock() are
20  *    used to acq/rel
21  * 3) proc->inner_lock : protects the thread and node lists
22  *    (proc->threads, proc->waiting_threads, proc->nodes)
23  *    and all todo lists associated with the binder_proc
24  *    (proc->todo, thread->todo, proc->delivered_death and
25  *    node->async_todo), as well as thread->transaction_stack
26  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
27  *    are used to acq/rel
28  *
29  * Any lock under procA must never be nested under any lock at the same
30  * level or below on procB.
31  *
32  * Functions that require a lock held on entry indicate which lock
33  * in the suffix of the function name:
34  *
35  * foo_olocked() : requires node->outer_lock
36  * foo_nlocked() : requires node->lock
37  * foo_ilocked() : requires proc->inner_lock
38  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39  * foo_nilocked(): requires node->lock and proc->inner_lock
40  * ...
41  */
42
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69
70 #include <uapi/linux/android/binder.h>
71 #include <uapi/linux/android/binderfs.h>
72
73 #include <asm/cacheflush.h>
74
75 #include "binder_alloc.h"
76 #include "binder_internal.h"
77 #include "binder_trace.h"
78
79 static HLIST_HEAD(binder_deferred_list);
80 static DEFINE_MUTEX(binder_deferred_lock);
81
82 static HLIST_HEAD(binder_devices);
83 static HLIST_HEAD(binder_procs);
84 static DEFINE_MUTEX(binder_procs_lock);
85
86 static HLIST_HEAD(binder_dead_nodes);
87 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
88
89 static struct dentry *binder_debugfs_dir_entry_root;
90 static struct dentry *binder_debugfs_dir_entry_proc;
91 static atomic_t binder_last_id;
92
93 static int proc_show(struct seq_file *m, void *unused);
94 DEFINE_SHOW_ATTRIBUTE(proc);
95
96 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
97
98 enum {
99         BINDER_DEBUG_USER_ERROR             = 1U << 0,
100         BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
101         BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
102         BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
103         BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
104         BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
105         BINDER_DEBUG_READ_WRITE             = 1U << 6,
106         BINDER_DEBUG_USER_REFS              = 1U << 7,
107         BINDER_DEBUG_THREADS                = 1U << 8,
108         BINDER_DEBUG_TRANSACTION            = 1U << 9,
109         BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
110         BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
111         BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
112         BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
113         BINDER_DEBUG_SPINLOCKS              = 1U << 14,
114 };
115 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
116         BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
117 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
118
119 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
120 module_param_named(devices, binder_devices_param, charp, 0444);
121
122 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
123 static int binder_stop_on_user_error;
124
125 static int binder_set_stop_on_user_error(const char *val,
126                                          const struct kernel_param *kp)
127 {
128         int ret;
129
130         ret = param_set_int(val, kp);
131         if (binder_stop_on_user_error < 2)
132                 wake_up(&binder_user_error_wait);
133         return ret;
134 }
135 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
136         param_get_int, &binder_stop_on_user_error, 0644);
137
138 #define binder_debug(mask, x...) \
139         do { \
140                 if (binder_debug_mask & mask) \
141                         pr_info_ratelimited(x); \
142         } while (0)
143
144 #define binder_user_error(x...) \
145         do { \
146                 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
147                         pr_info_ratelimited(x); \
148                 if (binder_stop_on_user_error) \
149                         binder_stop_on_user_error = 2; \
150         } while (0)
151
152 #define to_flat_binder_object(hdr) \
153         container_of(hdr, struct flat_binder_object, hdr)
154
155 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
156
157 #define to_binder_buffer_object(hdr) \
158         container_of(hdr, struct binder_buffer_object, hdr)
159
160 #define to_binder_fd_array_object(hdr) \
161         container_of(hdr, struct binder_fd_array_object, hdr)
162
163 enum binder_stat_types {
164         BINDER_STAT_PROC,
165         BINDER_STAT_THREAD,
166         BINDER_STAT_NODE,
167         BINDER_STAT_REF,
168         BINDER_STAT_DEATH,
169         BINDER_STAT_TRANSACTION,
170         BINDER_STAT_TRANSACTION_COMPLETE,
171         BINDER_STAT_COUNT
172 };
173
174 struct binder_stats {
175         atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
176         atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
177         atomic_t obj_created[BINDER_STAT_COUNT];
178         atomic_t obj_deleted[BINDER_STAT_COUNT];
179 };
180
181 static struct binder_stats binder_stats;
182
183 static inline void binder_stats_deleted(enum binder_stat_types type)
184 {
185         atomic_inc(&binder_stats.obj_deleted[type]);
186 }
187
188 static inline void binder_stats_created(enum binder_stat_types type)
189 {
190         atomic_inc(&binder_stats.obj_created[type]);
191 }
192
193 struct binder_transaction_log binder_transaction_log;
194 struct binder_transaction_log binder_transaction_log_failed;
195
196 static struct binder_transaction_log_entry *binder_transaction_log_add(
197         struct binder_transaction_log *log)
198 {
199         struct binder_transaction_log_entry *e;
200         unsigned int cur = atomic_inc_return(&log->cur);
201
202         if (cur >= ARRAY_SIZE(log->entry))
203                 log->full = true;
204         e = &log->entry[cur % ARRAY_SIZE(log->entry)];
205         WRITE_ONCE(e->debug_id_done, 0);
206         /*
207          * write-barrier to synchronize access to e->debug_id_done.
208          * We make sure the initialized 0 value is seen before
209          * memset() other fields are zeroed by memset.
210          */
211         smp_wmb();
212         memset(e, 0, sizeof(*e));
213         return e;
214 }
215
216 /**
217  * struct binder_work - work enqueued on a worklist
218  * @entry:             node enqueued on list
219  * @type:              type of work to be performed
220  *
221  * There are separate work lists for proc, thread, and node (async).
222  */
223 struct binder_work {
224         struct list_head entry;
225
226         enum binder_work_type {
227                 BINDER_WORK_TRANSACTION = 1,
228                 BINDER_WORK_TRANSACTION_COMPLETE,
229                 BINDER_WORK_RETURN_ERROR,
230                 BINDER_WORK_NODE,
231                 BINDER_WORK_DEAD_BINDER,
232                 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
233                 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
234         } type;
235 };
236
237 struct binder_error {
238         struct binder_work work;
239         uint32_t cmd;
240 };
241
242 /**
243  * struct binder_node - binder node bookkeeping
244  * @debug_id:             unique ID for debugging
245  *                        (invariant after initialized)
246  * @lock:                 lock for node fields
247  * @work:                 worklist element for node work
248  *                        (protected by @proc->inner_lock)
249  * @rb_node:              element for proc->nodes tree
250  *                        (protected by @proc->inner_lock)
251  * @dead_node:            element for binder_dead_nodes list
252  *                        (protected by binder_dead_nodes_lock)
253  * @proc:                 binder_proc that owns this node
254  *                        (invariant after initialized)
255  * @refs:                 list of references on this node
256  *                        (protected by @lock)
257  * @internal_strong_refs: used to take strong references when
258  *                        initiating a transaction
259  *                        (protected by @proc->inner_lock if @proc
260  *                        and by @lock)
261  * @local_weak_refs:      weak user refs from local process
262  *                        (protected by @proc->inner_lock if @proc
263  *                        and by @lock)
264  * @local_strong_refs:    strong user refs from local process
265  *                        (protected by @proc->inner_lock if @proc
266  *                        and by @lock)
267  * @tmp_refs:             temporary kernel refs
268  *                        (protected by @proc->inner_lock while @proc
269  *                        is valid, and by binder_dead_nodes_lock
270  *                        if @proc is NULL. During inc/dec and node release
271  *                        it is also protected by @lock to provide safety
272  *                        as the node dies and @proc becomes NULL)
273  * @ptr:                  userspace pointer for node
274  *                        (invariant, no lock needed)
275  * @cookie:               userspace cookie for node
276  *                        (invariant, no lock needed)
277  * @has_strong_ref:       userspace notified of strong ref
278  *                        (protected by @proc->inner_lock if @proc
279  *                        and by @lock)
280  * @pending_strong_ref:   userspace has acked notification of strong ref
281  *                        (protected by @proc->inner_lock if @proc
282  *                        and by @lock)
283  * @has_weak_ref:         userspace notified of weak ref
284  *                        (protected by @proc->inner_lock if @proc
285  *                        and by @lock)
286  * @pending_weak_ref:     userspace has acked notification of weak ref
287  *                        (protected by @proc->inner_lock if @proc
288  *                        and by @lock)
289  * @has_async_transaction: async transaction to node in progress
290  *                        (protected by @lock)
291  * @accept_fds:           file descriptor operations supported for node
292  *                        (invariant after initialized)
293  * @min_priority:         minimum scheduling priority
294  *                        (invariant after initialized)
295  * @txn_security_ctx:     require sender's security context
296  *                        (invariant after initialized)
297  * @async_todo:           list of async work items
298  *                        (protected by @proc->inner_lock)
299  *
300  * Bookkeeping structure for binder nodes.
301  */
302 struct binder_node {
303         int debug_id;
304         spinlock_t lock;
305         struct binder_work work;
306         union {
307                 struct rb_node rb_node;
308                 struct hlist_node dead_node;
309         };
310         struct binder_proc *proc;
311         struct hlist_head refs;
312         int internal_strong_refs;
313         int local_weak_refs;
314         int local_strong_refs;
315         int tmp_refs;
316         binder_uintptr_t ptr;
317         binder_uintptr_t cookie;
318         struct {
319                 /*
320                  * bitfield elements protected by
321                  * proc inner_lock
322                  */
323                 u8 has_strong_ref:1;
324                 u8 pending_strong_ref:1;
325                 u8 has_weak_ref:1;
326                 u8 pending_weak_ref:1;
327         };
328         struct {
329                 /*
330                  * invariant after initialization
331                  */
332                 u8 accept_fds:1;
333                 u8 txn_security_ctx:1;
334                 u8 min_priority;
335         };
336         bool has_async_transaction;
337         struct list_head async_todo;
338 };
339
340 struct binder_ref_death {
341         /**
342          * @work: worklist element for death notifications
343          *        (protected by inner_lock of the proc that
344          *        this ref belongs to)
345          */
346         struct binder_work work;
347         binder_uintptr_t cookie;
348 };
349
350 /**
351  * struct binder_ref_data - binder_ref counts and id
352  * @debug_id:        unique ID for the ref
353  * @desc:            unique userspace handle for ref
354  * @strong:          strong ref count (debugging only if not locked)
355  * @weak:            weak ref count (debugging only if not locked)
356  *
357  * Structure to hold ref count and ref id information. Since
358  * the actual ref can only be accessed with a lock, this structure
359  * is used to return information about the ref to callers of
360  * ref inc/dec functions.
361  */
362 struct binder_ref_data {
363         int debug_id;
364         uint32_t desc;
365         int strong;
366         int weak;
367 };
368
369 /**
370  * struct binder_ref - struct to track references on nodes
371  * @data:        binder_ref_data containing id, handle, and current refcounts
372  * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
373  * @rb_node_node: node for lookup by @node in proc's rb_tree
374  * @node_entry:  list entry for node->refs list in target node
375  *               (protected by @node->lock)
376  * @proc:        binder_proc containing ref
377  * @node:        binder_node of target node. When cleaning up a
378  *               ref for deletion in binder_cleanup_ref, a non-NULL
379  *               @node indicates the node must be freed
380  * @death:       pointer to death notification (ref_death) if requested
381  *               (protected by @node->lock)
382  *
383  * Structure to track references from procA to target node (on procB). This
384  * structure is unsafe to access without holding @proc->outer_lock.
385  */
386 struct binder_ref {
387         /* Lookups needed: */
388         /*   node + proc => ref (transaction) */
389         /*   desc + proc => ref (transaction, inc/dec ref) */
390         /*   node => refs + procs (proc exit) */
391         struct binder_ref_data data;
392         struct rb_node rb_node_desc;
393         struct rb_node rb_node_node;
394         struct hlist_node node_entry;
395         struct binder_proc *proc;
396         struct binder_node *node;
397         struct binder_ref_death *death;
398 };
399
400 enum binder_deferred_state {
401         BINDER_DEFERRED_FLUSH        = 0x01,
402         BINDER_DEFERRED_RELEASE      = 0x02,
403 };
404
405 /**
406  * struct binder_proc - binder process bookkeeping
407  * @proc_node:            element for binder_procs list
408  * @threads:              rbtree of binder_threads in this proc
409  *                        (protected by @inner_lock)
410  * @nodes:                rbtree of binder nodes associated with
411  *                        this proc ordered by node->ptr
412  *                        (protected by @inner_lock)
413  * @refs_by_desc:         rbtree of refs ordered by ref->desc
414  *                        (protected by @outer_lock)
415  * @refs_by_node:         rbtree of refs ordered by ref->node
416  *                        (protected by @outer_lock)
417  * @waiting_threads:      threads currently waiting for proc work
418  *                        (protected by @inner_lock)
419  * @pid                   PID of group_leader of process
420  *                        (invariant after initialized)
421  * @tsk                   task_struct for group_leader of process
422  *                        (invariant after initialized)
423  * @cred                  struct cred associated with the `struct file`
424  *                        in binder_open()
425  *                        (invariant after initialized)
426  * @deferred_work_node:   element for binder_deferred_list
427  *                        (protected by binder_deferred_lock)
428  * @deferred_work:        bitmap of deferred work to perform
429  *                        (protected by binder_deferred_lock)
430  * @is_dead:              process is dead and awaiting free
431  *                        when outstanding transactions are cleaned up
432  *                        (protected by @inner_lock)
433  * @todo:                 list of work for this process
434  *                        (protected by @inner_lock)
435  * @stats:                per-process binder statistics
436  *                        (atomics, no lock needed)
437  * @delivered_death:      list of delivered death notification
438  *                        (protected by @inner_lock)
439  * @max_threads:          cap on number of binder threads
440  *                        (protected by @inner_lock)
441  * @requested_threads:    number of binder threads requested but not
442  *                        yet started. In current implementation, can
443  *                        only be 0 or 1.
444  *                        (protected by @inner_lock)
445  * @requested_threads_started: number binder threads started
446  *                        (protected by @inner_lock)
447  * @tmp_ref:              temporary reference to indicate proc is in use
448  *                        (protected by @inner_lock)
449  * @default_priority:     default scheduler priority
450  *                        (invariant after initialized)
451  * @debugfs_entry:        debugfs node
452  * @alloc:                binder allocator bookkeeping
453  * @context:              binder_context for this proc
454  *                        (invariant after initialized)
455  * @inner_lock:           can nest under outer_lock and/or node lock
456  * @outer_lock:           no nesting under innor or node lock
457  *                        Lock order: 1) outer, 2) node, 3) inner
458  * @binderfs_entry:       process-specific binderfs log file
459  *
460  * Bookkeeping structure for binder processes
461  */
462 struct binder_proc {
463         struct hlist_node proc_node;
464         struct rb_root threads;
465         struct rb_root nodes;
466         struct rb_root refs_by_desc;
467         struct rb_root refs_by_node;
468         struct list_head waiting_threads;
469         int pid;
470         struct task_struct *tsk;
471         const struct cred *cred;
472         struct hlist_node deferred_work_node;
473         int deferred_work;
474         bool is_dead;
475
476         struct list_head todo;
477         struct binder_stats stats;
478         struct list_head delivered_death;
479         int max_threads;
480         int requested_threads;
481         int requested_threads_started;
482         int tmp_ref;
483         long default_priority;
484         struct dentry *debugfs_entry;
485         struct binder_alloc alloc;
486         struct binder_context *context;
487         spinlock_t inner_lock;
488         spinlock_t outer_lock;
489         struct dentry *binderfs_entry;
490 };
491
492 enum {
493         BINDER_LOOPER_STATE_REGISTERED  = 0x01,
494         BINDER_LOOPER_STATE_ENTERED     = 0x02,
495         BINDER_LOOPER_STATE_EXITED      = 0x04,
496         BINDER_LOOPER_STATE_INVALID     = 0x08,
497         BINDER_LOOPER_STATE_WAITING     = 0x10,
498         BINDER_LOOPER_STATE_POLL        = 0x20,
499 };
500
501 /**
502  * struct binder_thread - binder thread bookkeeping
503  * @proc:                 binder process for this thread
504  *                        (invariant after initialization)
505  * @rb_node:              element for proc->threads rbtree
506  *                        (protected by @proc->inner_lock)
507  * @waiting_thread_node:  element for @proc->waiting_threads list
508  *                        (protected by @proc->inner_lock)
509  * @pid:                  PID for this thread
510  *                        (invariant after initialization)
511  * @looper:               bitmap of looping state
512  *                        (only accessed by this thread)
513  * @looper_needs_return:  looping thread needs to exit driver
514  *                        (no lock needed)
515  * @transaction_stack:    stack of in-progress transactions for this thread
516  *                        (protected by @proc->inner_lock)
517  * @todo:                 list of work to do for this thread
518  *                        (protected by @proc->inner_lock)
519  * @process_todo:         whether work in @todo should be processed
520  *                        (protected by @proc->inner_lock)
521  * @return_error:         transaction errors reported by this thread
522  *                        (only accessed by this thread)
523  * @reply_error:          transaction errors reported by target thread
524  *                        (protected by @proc->inner_lock)
525  * @wait:                 wait queue for thread work
526  * @stats:                per-thread statistics
527  *                        (atomics, no lock needed)
528  * @tmp_ref:              temporary reference to indicate thread is in use
529  *                        (atomic since @proc->inner_lock cannot
530  *                        always be acquired)
531  * @is_dead:              thread is dead and awaiting free
532  *                        when outstanding transactions are cleaned up
533  *                        (protected by @proc->inner_lock)
534  *
535  * Bookkeeping structure for binder threads.
536  */
537 struct binder_thread {
538         struct binder_proc *proc;
539         struct rb_node rb_node;
540         struct list_head waiting_thread_node;
541         int pid;
542         int looper;              /* only modified by this thread */
543         bool looper_need_return; /* can be written by other thread */
544         struct binder_transaction *transaction_stack;
545         struct list_head todo;
546         bool process_todo;
547         struct binder_error return_error;
548         struct binder_error reply_error;
549         wait_queue_head_t wait;
550         struct binder_stats stats;
551         atomic_t tmp_ref;
552         bool is_dead;
553 };
554
555 /**
556  * struct binder_txn_fd_fixup - transaction fd fixup list element
557  * @fixup_entry:          list entry
558  * @file:                 struct file to be associated with new fd
559  * @offset:               offset in buffer data to this fixup
560  *
561  * List element for fd fixups in a transaction. Since file
562  * descriptors need to be allocated in the context of the
563  * target process, we pass each fd to be processed in this
564  * struct.
565  */
566 struct binder_txn_fd_fixup {
567         struct list_head fixup_entry;
568         struct file *file;
569         size_t offset;
570 };
571
572 struct binder_transaction {
573         int debug_id;
574         struct binder_work work;
575         struct binder_thread *from;
576         struct binder_transaction *from_parent;
577         struct binder_proc *to_proc;
578         struct binder_thread *to_thread;
579         struct binder_transaction *to_parent;
580         unsigned need_reply:1;
581         /* unsigned is_dead:1; */       /* not used at the moment */
582
583         struct binder_buffer *buffer;
584         unsigned int    code;
585         unsigned int    flags;
586         long    priority;
587         long    saved_priority;
588         kuid_t  sender_euid;
589         struct list_head fd_fixups;
590         binder_uintptr_t security_ctx;
591         /**
592          * @lock:  protects @from, @to_proc, and @to_thread
593          *
594          * @from, @to_proc, and @to_thread can be set to NULL
595          * during thread teardown
596          */
597         spinlock_t lock;
598 };
599
600 /**
601  * struct binder_object - union of flat binder object types
602  * @hdr:   generic object header
603  * @fbo:   binder object (nodes and refs)
604  * @fdo:   file descriptor object
605  * @bbo:   binder buffer pointer
606  * @fdao:  file descriptor array
607  *
608  * Used for type-independent object copies
609  */
610 struct binder_object {
611         union {
612                 struct binder_object_header hdr;
613                 struct flat_binder_object fbo;
614                 struct binder_fd_object fdo;
615                 struct binder_buffer_object bbo;
616                 struct binder_fd_array_object fdao;
617         };
618 };
619
620 /**
621  * binder_proc_lock() - Acquire outer lock for given binder_proc
622  * @proc:         struct binder_proc to acquire
623  *
624  * Acquires proc->outer_lock. Used to protect binder_ref
625  * structures associated with the given proc.
626  */
627 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
628 static void
629 _binder_proc_lock(struct binder_proc *proc, int line)
630         __acquires(&proc->outer_lock)
631 {
632         binder_debug(BINDER_DEBUG_SPINLOCKS,
633                      "%s: line=%d\n", __func__, line);
634         spin_lock(&proc->outer_lock);
635 }
636
637 /**
638  * binder_proc_unlock() - Release spinlock for given binder_proc
639  * @proc:         struct binder_proc to acquire
640  *
641  * Release lock acquired via binder_proc_lock()
642  */
643 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
644 static void
645 _binder_proc_unlock(struct binder_proc *proc, int line)
646         __releases(&proc->outer_lock)
647 {
648         binder_debug(BINDER_DEBUG_SPINLOCKS,
649                      "%s: line=%d\n", __func__, line);
650         spin_unlock(&proc->outer_lock);
651 }
652
653 /**
654  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
655  * @proc:         struct binder_proc to acquire
656  *
657  * Acquires proc->inner_lock. Used to protect todo lists
658  */
659 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
660 static void
661 _binder_inner_proc_lock(struct binder_proc *proc, int line)
662         __acquires(&proc->inner_lock)
663 {
664         binder_debug(BINDER_DEBUG_SPINLOCKS,
665                      "%s: line=%d\n", __func__, line);
666         spin_lock(&proc->inner_lock);
667 }
668
669 /**
670  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
671  * @proc:         struct binder_proc to acquire
672  *
673  * Release lock acquired via binder_inner_proc_lock()
674  */
675 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
676 static void
677 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
678         __releases(&proc->inner_lock)
679 {
680         binder_debug(BINDER_DEBUG_SPINLOCKS,
681                      "%s: line=%d\n", __func__, line);
682         spin_unlock(&proc->inner_lock);
683 }
684
685 /**
686  * binder_node_lock() - Acquire spinlock for given binder_node
687  * @node:         struct binder_node to acquire
688  *
689  * Acquires node->lock. Used to protect binder_node fields
690  */
691 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
692 static void
693 _binder_node_lock(struct binder_node *node, int line)
694         __acquires(&node->lock)
695 {
696         binder_debug(BINDER_DEBUG_SPINLOCKS,
697                      "%s: line=%d\n", __func__, line);
698         spin_lock(&node->lock);
699 }
700
701 /**
702  * binder_node_unlock() - Release spinlock for given binder_proc
703  * @node:         struct binder_node to acquire
704  *
705  * Release lock acquired via binder_node_lock()
706  */
707 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
708 static void
709 _binder_node_unlock(struct binder_node *node, int line)
710         __releases(&node->lock)
711 {
712         binder_debug(BINDER_DEBUG_SPINLOCKS,
713                      "%s: line=%d\n", __func__, line);
714         spin_unlock(&node->lock);
715 }
716
717 /**
718  * binder_node_inner_lock() - Acquire node and inner locks
719  * @node:         struct binder_node to acquire
720  *
721  * Acquires node->lock. If node->proc also acquires
722  * proc->inner_lock. Used to protect binder_node fields
723  */
724 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
725 static void
726 _binder_node_inner_lock(struct binder_node *node, int line)
727         __acquires(&node->lock) __acquires(&node->proc->inner_lock)
728 {
729         binder_debug(BINDER_DEBUG_SPINLOCKS,
730                      "%s: line=%d\n", __func__, line);
731         spin_lock(&node->lock);
732         if (node->proc)
733                 binder_inner_proc_lock(node->proc);
734         else
735                 /* annotation for sparse */
736                 __acquire(&node->proc->inner_lock);
737 }
738
739 /**
740  * binder_node_unlock() - Release node and inner locks
741  * @node:         struct binder_node to acquire
742  *
743  * Release lock acquired via binder_node_lock()
744  */
745 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
746 static void
747 _binder_node_inner_unlock(struct binder_node *node, int line)
748         __releases(&node->lock) __releases(&node->proc->inner_lock)
749 {
750         struct binder_proc *proc = node->proc;
751
752         binder_debug(BINDER_DEBUG_SPINLOCKS,
753                      "%s: line=%d\n", __func__, line);
754         if (proc)
755                 binder_inner_proc_unlock(proc);
756         else
757                 /* annotation for sparse */
758                 __release(&node->proc->inner_lock);
759         spin_unlock(&node->lock);
760 }
761
762 static bool binder_worklist_empty_ilocked(struct list_head *list)
763 {
764         return list_empty(list);
765 }
766
767 /**
768  * binder_worklist_empty() - Check if no items on the work list
769  * @proc:       binder_proc associated with list
770  * @list:       list to check
771  *
772  * Return: true if there are no items on list, else false
773  */
774 static bool binder_worklist_empty(struct binder_proc *proc,
775                                   struct list_head *list)
776 {
777         bool ret;
778
779         binder_inner_proc_lock(proc);
780         ret = binder_worklist_empty_ilocked(list);
781         binder_inner_proc_unlock(proc);
782         return ret;
783 }
784
785 /**
786  * binder_enqueue_work_ilocked() - Add an item to the work list
787  * @work:         struct binder_work to add to list
788  * @target_list:  list to add work to
789  *
790  * Adds the work to the specified list. Asserts that work
791  * is not already on a list.
792  *
793  * Requires the proc->inner_lock to be held.
794  */
795 static void
796 binder_enqueue_work_ilocked(struct binder_work *work,
797                            struct list_head *target_list)
798 {
799         BUG_ON(target_list == NULL);
800         BUG_ON(work->entry.next && !list_empty(&work->entry));
801         list_add_tail(&work->entry, target_list);
802 }
803
804 /**
805  * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
806  * @thread:       thread to queue work to
807  * @work:         struct binder_work to add to list
808  *
809  * Adds the work to the todo list of the thread. Doesn't set the process_todo
810  * flag, which means that (if it wasn't already set) the thread will go to
811  * sleep without handling this work when it calls read.
812  *
813  * Requires the proc->inner_lock to be held.
814  */
815 static void
816 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
817                                             struct binder_work *work)
818 {
819         WARN_ON(!list_empty(&thread->waiting_thread_node));
820         binder_enqueue_work_ilocked(work, &thread->todo);
821 }
822
823 /**
824  * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
825  * @thread:       thread to queue work to
826  * @work:         struct binder_work to add to list
827  *
828  * Adds the work to the todo list of the thread, and enables processing
829  * of the todo queue.
830  *
831  * Requires the proc->inner_lock to be held.
832  */
833 static void
834 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
835                                    struct binder_work *work)
836 {
837         WARN_ON(!list_empty(&thread->waiting_thread_node));
838         binder_enqueue_work_ilocked(work, &thread->todo);
839
840         /* (e)poll-based threads require an explicit wakeup signal when
841          * queuing their own work; they rely on these events to consume
842          * messages without I/O block. Without it, threads risk waiting
843          * indefinitely without handling the work.
844          */
845         if (thread->looper & BINDER_LOOPER_STATE_POLL &&
846             thread->pid == current->pid && !thread->process_todo)
847                 wake_up_interruptible_sync(&thread->wait);
848
849         thread->process_todo = true;
850 }
851
852 /**
853  * binder_enqueue_thread_work() - Add an item to the thread work list
854  * @thread:       thread to queue work to
855  * @work:         struct binder_work to add to list
856  *
857  * Adds the work to the todo list of the thread, and enables processing
858  * of the todo queue.
859  */
860 static void
861 binder_enqueue_thread_work(struct binder_thread *thread,
862                            struct binder_work *work)
863 {
864         binder_inner_proc_lock(thread->proc);
865         binder_enqueue_thread_work_ilocked(thread, work);
866         binder_inner_proc_unlock(thread->proc);
867 }
868
869 static void
870 binder_dequeue_work_ilocked(struct binder_work *work)
871 {
872         list_del_init(&work->entry);
873 }
874
875 /**
876  * binder_dequeue_work() - Removes an item from the work list
877  * @proc:         binder_proc associated with list
878  * @work:         struct binder_work to remove from list
879  *
880  * Removes the specified work item from whatever list it is on.
881  * Can safely be called if work is not on any list.
882  */
883 static void
884 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
885 {
886         binder_inner_proc_lock(proc);
887         binder_dequeue_work_ilocked(work);
888         binder_inner_proc_unlock(proc);
889 }
890
891 static struct binder_work *binder_dequeue_work_head_ilocked(
892                                         struct list_head *list)
893 {
894         struct binder_work *w;
895
896         w = list_first_entry_or_null(list, struct binder_work, entry);
897         if (w)
898                 list_del_init(&w->entry);
899         return w;
900 }
901
902 static void
903 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
904 static void binder_free_thread(struct binder_thread *thread);
905 static void binder_free_proc(struct binder_proc *proc);
906 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
907
908 static bool binder_has_work_ilocked(struct binder_thread *thread,
909                                     bool do_proc_work)
910 {
911         return thread->process_todo ||
912                 thread->looper_need_return ||
913                 (do_proc_work &&
914                  !binder_worklist_empty_ilocked(&thread->proc->todo));
915 }
916
917 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
918 {
919         bool has_work;
920
921         binder_inner_proc_lock(thread->proc);
922         has_work = binder_has_work_ilocked(thread, do_proc_work);
923         binder_inner_proc_unlock(thread->proc);
924
925         return has_work;
926 }
927
928 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
929 {
930         return !thread->transaction_stack &&
931                 binder_worklist_empty_ilocked(&thread->todo) &&
932                 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
933                                    BINDER_LOOPER_STATE_REGISTERED));
934 }
935
936 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
937                                                bool sync)
938 {
939         struct rb_node *n;
940         struct binder_thread *thread;
941
942         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
943                 thread = rb_entry(n, struct binder_thread, rb_node);
944                 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
945                     binder_available_for_proc_work_ilocked(thread)) {
946                         if (sync)
947                                 wake_up_interruptible_sync(&thread->wait);
948                         else
949                                 wake_up_interruptible(&thread->wait);
950                 }
951         }
952 }
953
954 /**
955  * binder_select_thread_ilocked() - selects a thread for doing proc work.
956  * @proc:       process to select a thread from
957  *
958  * Note that calling this function moves the thread off the waiting_threads
959  * list, so it can only be woken up by the caller of this function, or a
960  * signal. Therefore, callers *should* always wake up the thread this function
961  * returns.
962  *
963  * Return:      If there's a thread currently waiting for process work,
964  *              returns that thread. Otherwise returns NULL.
965  */
966 static struct binder_thread *
967 binder_select_thread_ilocked(struct binder_proc *proc)
968 {
969         struct binder_thread *thread;
970
971         assert_spin_locked(&proc->inner_lock);
972         thread = list_first_entry_or_null(&proc->waiting_threads,
973                                           struct binder_thread,
974                                           waiting_thread_node);
975
976         if (thread)
977                 list_del_init(&thread->waiting_thread_node);
978
979         return thread;
980 }
981
982 /**
983  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
984  * @proc:       process to wake up a thread in
985  * @thread:     specific thread to wake-up (may be NULL)
986  * @sync:       whether to do a synchronous wake-up
987  *
988  * This function wakes up a thread in the @proc process.
989  * The caller may provide a specific thread to wake-up in
990  * the @thread parameter. If @thread is NULL, this function
991  * will wake up threads that have called poll().
992  *
993  * Note that for this function to work as expected, callers
994  * should first call binder_select_thread() to find a thread
995  * to handle the work (if they don't have a thread already),
996  * and pass the result into the @thread parameter.
997  */
998 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
999                                          struct binder_thread *thread,
1000                                          bool sync)
1001 {
1002         assert_spin_locked(&proc->inner_lock);
1003
1004         if (thread) {
1005                 if (sync)
1006                         wake_up_interruptible_sync(&thread->wait);
1007                 else
1008                         wake_up_interruptible(&thread->wait);
1009                 return;
1010         }
1011
1012         /* Didn't find a thread waiting for proc work; this can happen
1013          * in two scenarios:
1014          * 1. All threads are busy handling transactions
1015          *    In that case, one of those threads should call back into
1016          *    the kernel driver soon and pick up this work.
1017          * 2. Threads are using the (e)poll interface, in which case
1018          *    they may be blocked on the waitqueue without having been
1019          *    added to waiting_threads. For this case, we just iterate
1020          *    over all threads not handling transaction work, and
1021          *    wake them all up. We wake all because we don't know whether
1022          *    a thread that called into (e)poll is handling non-binder
1023          *    work currently.
1024          */
1025         binder_wakeup_poll_threads_ilocked(proc, sync);
1026 }
1027
1028 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1029 {
1030         struct binder_thread *thread = binder_select_thread_ilocked(proc);
1031
1032         binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1033 }
1034
1035 static void binder_set_nice(long nice)
1036 {
1037         long min_nice;
1038
1039         if (can_nice(current, nice)) {
1040                 set_user_nice(current, nice);
1041                 return;
1042         }
1043         min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1044         binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1045                      "%d: nice value %ld not allowed use %ld instead\n",
1046                       current->pid, nice, min_nice);
1047         set_user_nice(current, min_nice);
1048         if (min_nice <= MAX_NICE)
1049                 return;
1050         binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1051 }
1052
1053 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1054                                                    binder_uintptr_t ptr)
1055 {
1056         struct rb_node *n = proc->nodes.rb_node;
1057         struct binder_node *node;
1058
1059         assert_spin_locked(&proc->inner_lock);
1060
1061         while (n) {
1062                 node = rb_entry(n, struct binder_node, rb_node);
1063
1064                 if (ptr < node->ptr)
1065                         n = n->rb_left;
1066                 else if (ptr > node->ptr)
1067                         n = n->rb_right;
1068                 else {
1069                         /*
1070                          * take an implicit weak reference
1071                          * to ensure node stays alive until
1072                          * call to binder_put_node()
1073                          */
1074                         binder_inc_node_tmpref_ilocked(node);
1075                         return node;
1076                 }
1077         }
1078         return NULL;
1079 }
1080
1081 static struct binder_node *binder_get_node(struct binder_proc *proc,
1082                                            binder_uintptr_t ptr)
1083 {
1084         struct binder_node *node;
1085
1086         binder_inner_proc_lock(proc);
1087         node = binder_get_node_ilocked(proc, ptr);
1088         binder_inner_proc_unlock(proc);
1089         return node;
1090 }
1091
1092 static struct binder_node *binder_init_node_ilocked(
1093                                                 struct binder_proc *proc,
1094                                                 struct binder_node *new_node,
1095                                                 struct flat_binder_object *fp)
1096 {
1097         struct rb_node **p = &proc->nodes.rb_node;
1098         struct rb_node *parent = NULL;
1099         struct binder_node *node;
1100         binder_uintptr_t ptr = fp ? fp->binder : 0;
1101         binder_uintptr_t cookie = fp ? fp->cookie : 0;
1102         __u32 flags = fp ? fp->flags : 0;
1103
1104         assert_spin_locked(&proc->inner_lock);
1105
1106         while (*p) {
1107
1108                 parent = *p;
1109                 node = rb_entry(parent, struct binder_node, rb_node);
1110
1111                 if (ptr < node->ptr)
1112                         p = &(*p)->rb_left;
1113                 else if (ptr > node->ptr)
1114                         p = &(*p)->rb_right;
1115                 else {
1116                         /*
1117                          * A matching node is already in
1118                          * the rb tree. Abandon the init
1119                          * and return it.
1120                          */
1121                         binder_inc_node_tmpref_ilocked(node);
1122                         return node;
1123                 }
1124         }
1125         node = new_node;
1126         binder_stats_created(BINDER_STAT_NODE);
1127         node->tmp_refs++;
1128         rb_link_node(&node->rb_node, parent, p);
1129         rb_insert_color(&node->rb_node, &proc->nodes);
1130         node->debug_id = atomic_inc_return(&binder_last_id);
1131         node->proc = proc;
1132         node->ptr = ptr;
1133         node->cookie = cookie;
1134         node->work.type = BINDER_WORK_NODE;
1135         node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1136         node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1137         node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1138         spin_lock_init(&node->lock);
1139         INIT_LIST_HEAD(&node->work.entry);
1140         INIT_LIST_HEAD(&node->async_todo);
1141         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1142                      "%d:%d node %d u%016llx c%016llx created\n",
1143                      proc->pid, current->pid, node->debug_id,
1144                      (u64)node->ptr, (u64)node->cookie);
1145
1146         return node;
1147 }
1148
1149 static struct binder_node *binder_new_node(struct binder_proc *proc,
1150                                            struct flat_binder_object *fp)
1151 {
1152         struct binder_node *node;
1153         struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1154
1155         if (!new_node)
1156                 return NULL;
1157         binder_inner_proc_lock(proc);
1158         node = binder_init_node_ilocked(proc, new_node, fp);
1159         binder_inner_proc_unlock(proc);
1160         if (node != new_node)
1161                 /*
1162                  * The node was already added by another thread
1163                  */
1164                 kfree(new_node);
1165
1166         return node;
1167 }
1168
1169 static void binder_free_node(struct binder_node *node)
1170 {
1171         kfree(node);
1172         binder_stats_deleted(BINDER_STAT_NODE);
1173 }
1174
1175 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1176                                     int internal,
1177                                     struct list_head *target_list)
1178 {
1179         struct binder_proc *proc = node->proc;
1180
1181         assert_spin_locked(&node->lock);
1182         if (proc)
1183                 assert_spin_locked(&proc->inner_lock);
1184         if (strong) {
1185                 if (internal) {
1186                         if (target_list == NULL &&
1187                             node->internal_strong_refs == 0 &&
1188                             !(node->proc &&
1189                               node == node->proc->context->binder_context_mgr_node &&
1190                               node->has_strong_ref)) {
1191                                 pr_err("invalid inc strong node for %d\n",
1192                                         node->debug_id);
1193                                 return -EINVAL;
1194                         }
1195                         node->internal_strong_refs++;
1196                 } else
1197                         node->local_strong_refs++;
1198                 if (!node->has_strong_ref && target_list) {
1199                         struct binder_thread *thread = container_of(target_list,
1200                                                     struct binder_thread, todo);
1201                         binder_dequeue_work_ilocked(&node->work);
1202                         BUG_ON(&thread->todo != target_list);
1203                         binder_enqueue_deferred_thread_work_ilocked(thread,
1204                                                                    &node->work);
1205                 }
1206         } else {
1207                 if (!internal)
1208                         node->local_weak_refs++;
1209                 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1210                         if (target_list == NULL) {
1211                                 pr_err("invalid inc weak node for %d\n",
1212                                         node->debug_id);
1213                                 return -EINVAL;
1214                         }
1215                         /*
1216                          * See comment above
1217                          */
1218                         binder_enqueue_work_ilocked(&node->work, target_list);
1219                 }
1220         }
1221         return 0;
1222 }
1223
1224 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1225                            struct list_head *target_list)
1226 {
1227         int ret;
1228
1229         binder_node_inner_lock(node);
1230         ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1231         binder_node_inner_unlock(node);
1232
1233         return ret;
1234 }
1235
1236 static bool binder_dec_node_nilocked(struct binder_node *node,
1237                                      int strong, int internal)
1238 {
1239         struct binder_proc *proc = node->proc;
1240
1241         assert_spin_locked(&node->lock);
1242         if (proc)
1243                 assert_spin_locked(&proc->inner_lock);
1244         if (strong) {
1245                 if (internal)
1246                         node->internal_strong_refs--;
1247                 else
1248                         node->local_strong_refs--;
1249                 if (node->local_strong_refs || node->internal_strong_refs)
1250                         return false;
1251         } else {
1252                 if (!internal)
1253                         node->local_weak_refs--;
1254                 if (node->local_weak_refs || node->tmp_refs ||
1255                                 !hlist_empty(&node->refs))
1256                         return false;
1257         }
1258
1259         if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1260                 if (list_empty(&node->work.entry)) {
1261                         binder_enqueue_work_ilocked(&node->work, &proc->todo);
1262                         binder_wakeup_proc_ilocked(proc);
1263                 }
1264         } else {
1265                 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1266                     !node->local_weak_refs && !node->tmp_refs) {
1267                         if (proc) {
1268                                 binder_dequeue_work_ilocked(&node->work);
1269                                 rb_erase(&node->rb_node, &proc->nodes);
1270                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1271                                              "refless node %d deleted\n",
1272                                              node->debug_id);
1273                         } else {
1274                                 BUG_ON(!list_empty(&node->work.entry));
1275                                 spin_lock(&binder_dead_nodes_lock);
1276                                 /*
1277                                  * tmp_refs could have changed so
1278                                  * check it again
1279                                  */
1280                                 if (node->tmp_refs) {
1281                                         spin_unlock(&binder_dead_nodes_lock);
1282                                         return false;
1283                                 }
1284                                 hlist_del(&node->dead_node);
1285                                 spin_unlock(&binder_dead_nodes_lock);
1286                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1287                                              "dead node %d deleted\n",
1288                                              node->debug_id);
1289                         }
1290                         return true;
1291                 }
1292         }
1293         return false;
1294 }
1295
1296 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1297 {
1298         bool free_node;
1299
1300         binder_node_inner_lock(node);
1301         free_node = binder_dec_node_nilocked(node, strong, internal);
1302         binder_node_inner_unlock(node);
1303         if (free_node)
1304                 binder_free_node(node);
1305 }
1306
1307 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1308 {
1309         /*
1310          * No call to binder_inc_node() is needed since we
1311          * don't need to inform userspace of any changes to
1312          * tmp_refs
1313          */
1314         node->tmp_refs++;
1315 }
1316
1317 /**
1318  * binder_inc_node_tmpref() - take a temporary reference on node
1319  * @node:       node to reference
1320  *
1321  * Take reference on node to prevent the node from being freed
1322  * while referenced only by a local variable. The inner lock is
1323  * needed to serialize with the node work on the queue (which
1324  * isn't needed after the node is dead). If the node is dead
1325  * (node->proc is NULL), use binder_dead_nodes_lock to protect
1326  * node->tmp_refs against dead-node-only cases where the node
1327  * lock cannot be acquired (eg traversing the dead node list to
1328  * print nodes)
1329  */
1330 static void binder_inc_node_tmpref(struct binder_node *node)
1331 {
1332         binder_node_lock(node);
1333         if (node->proc)
1334                 binder_inner_proc_lock(node->proc);
1335         else
1336                 spin_lock(&binder_dead_nodes_lock);
1337         binder_inc_node_tmpref_ilocked(node);
1338         if (node->proc)
1339                 binder_inner_proc_unlock(node->proc);
1340         else
1341                 spin_unlock(&binder_dead_nodes_lock);
1342         binder_node_unlock(node);
1343 }
1344
1345 /**
1346  * binder_dec_node_tmpref() - remove a temporary reference on node
1347  * @node:       node to reference
1348  *
1349  * Release temporary reference on node taken via binder_inc_node_tmpref()
1350  */
1351 static void binder_dec_node_tmpref(struct binder_node *node)
1352 {
1353         bool free_node;
1354
1355         binder_node_inner_lock(node);
1356         if (!node->proc)
1357                 spin_lock(&binder_dead_nodes_lock);
1358         else
1359                 __acquire(&binder_dead_nodes_lock);
1360         node->tmp_refs--;
1361         BUG_ON(node->tmp_refs < 0);
1362         if (!node->proc)
1363                 spin_unlock(&binder_dead_nodes_lock);
1364         else
1365                 __release(&binder_dead_nodes_lock);
1366         /*
1367          * Call binder_dec_node() to check if all refcounts are 0
1368          * and cleanup is needed. Calling with strong=0 and internal=1
1369          * causes no actual reference to be released in binder_dec_node().
1370          * If that changes, a change is needed here too.
1371          */
1372         free_node = binder_dec_node_nilocked(node, 0, 1);
1373         binder_node_inner_unlock(node);
1374         if (free_node)
1375                 binder_free_node(node);
1376 }
1377
1378 static void binder_put_node(struct binder_node *node)
1379 {
1380         binder_dec_node_tmpref(node);
1381 }
1382
1383 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1384                                                  u32 desc, bool need_strong_ref)
1385 {
1386         struct rb_node *n = proc->refs_by_desc.rb_node;
1387         struct binder_ref *ref;
1388
1389         while (n) {
1390                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1391
1392                 if (desc < ref->data.desc) {
1393                         n = n->rb_left;
1394                 } else if (desc > ref->data.desc) {
1395                         n = n->rb_right;
1396                 } else if (need_strong_ref && !ref->data.strong) {
1397                         binder_user_error("tried to use weak ref as strong ref\n");
1398                         return NULL;
1399                 } else {
1400                         return ref;
1401                 }
1402         }
1403         return NULL;
1404 }
1405
1406 /**
1407  * binder_get_ref_for_node_olocked() - get the ref associated with given node
1408  * @proc:       binder_proc that owns the ref
1409  * @node:       binder_node of target
1410  * @new_ref:    newly allocated binder_ref to be initialized or %NULL
1411  *
1412  * Look up the ref for the given node and return it if it exists
1413  *
1414  * If it doesn't exist and the caller provides a newly allocated
1415  * ref, initialize the fields of the newly allocated ref and insert
1416  * into the given proc rb_trees and node refs list.
1417  *
1418  * Return:      the ref for node. It is possible that another thread
1419  *              allocated/initialized the ref first in which case the
1420  *              returned ref would be different than the passed-in
1421  *              new_ref. new_ref must be kfree'd by the caller in
1422  *              this case.
1423  */
1424 static struct binder_ref *binder_get_ref_for_node_olocked(
1425                                         struct binder_proc *proc,
1426                                         struct binder_node *node,
1427                                         struct binder_ref *new_ref)
1428 {
1429         struct binder_context *context = proc->context;
1430         struct rb_node **p = &proc->refs_by_node.rb_node;
1431         struct rb_node *parent = NULL;
1432         struct binder_ref *ref;
1433         struct rb_node *n;
1434
1435         while (*p) {
1436                 parent = *p;
1437                 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1438
1439                 if (node < ref->node)
1440                         p = &(*p)->rb_left;
1441                 else if (node > ref->node)
1442                         p = &(*p)->rb_right;
1443                 else
1444                         return ref;
1445         }
1446         if (!new_ref)
1447                 return NULL;
1448
1449         binder_stats_created(BINDER_STAT_REF);
1450         new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1451         new_ref->proc = proc;
1452         new_ref->node = node;
1453         rb_link_node(&new_ref->rb_node_node, parent, p);
1454         rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1455
1456         new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1457         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1458                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1459                 if (ref->data.desc > new_ref->data.desc)
1460                         break;
1461                 new_ref->data.desc = ref->data.desc + 1;
1462         }
1463
1464         p = &proc->refs_by_desc.rb_node;
1465         while (*p) {
1466                 parent = *p;
1467                 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1468
1469                 if (new_ref->data.desc < ref->data.desc)
1470                         p = &(*p)->rb_left;
1471                 else if (new_ref->data.desc > ref->data.desc)
1472                         p = &(*p)->rb_right;
1473                 else
1474                         BUG();
1475         }
1476         rb_link_node(&new_ref->rb_node_desc, parent, p);
1477         rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1478
1479         binder_node_lock(node);
1480         hlist_add_head(&new_ref->node_entry, &node->refs);
1481
1482         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1483                      "%d new ref %d desc %d for node %d\n",
1484                       proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1485                       node->debug_id);
1486         binder_node_unlock(node);
1487         return new_ref;
1488 }
1489
1490 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1491 {
1492         bool delete_node = false;
1493
1494         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1495                      "%d delete ref %d desc %d for node %d\n",
1496                       ref->proc->pid, ref->data.debug_id, ref->data.desc,
1497                       ref->node->debug_id);
1498
1499         rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1500         rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1501
1502         binder_node_inner_lock(ref->node);
1503         if (ref->data.strong)
1504                 binder_dec_node_nilocked(ref->node, 1, 1);
1505
1506         hlist_del(&ref->node_entry);
1507         delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1508         binder_node_inner_unlock(ref->node);
1509         /*
1510          * Clear ref->node unless we want the caller to free the node
1511          */
1512         if (!delete_node) {
1513                 /*
1514                  * The caller uses ref->node to determine
1515                  * whether the node needs to be freed. Clear
1516                  * it since the node is still alive.
1517                  */
1518                 ref->node = NULL;
1519         }
1520
1521         if (ref->death) {
1522                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1523                              "%d delete ref %d desc %d has death notification\n",
1524                               ref->proc->pid, ref->data.debug_id,
1525                               ref->data.desc);
1526                 binder_dequeue_work(ref->proc, &ref->death->work);
1527                 binder_stats_deleted(BINDER_STAT_DEATH);
1528         }
1529         binder_stats_deleted(BINDER_STAT_REF);
1530 }
1531
1532 /**
1533  * binder_inc_ref_olocked() - increment the ref for given handle
1534  * @ref:         ref to be incremented
1535  * @strong:      if true, strong increment, else weak
1536  * @target_list: list to queue node work on
1537  *
1538  * Increment the ref. @ref->proc->outer_lock must be held on entry
1539  *
1540  * Return: 0, if successful, else errno
1541  */
1542 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1543                                   struct list_head *target_list)
1544 {
1545         int ret;
1546
1547         if (strong) {
1548                 if (ref->data.strong == 0) {
1549                         ret = binder_inc_node(ref->node, 1, 1, target_list);
1550                         if (ret)
1551                                 return ret;
1552                 }
1553                 ref->data.strong++;
1554         } else {
1555                 if (ref->data.weak == 0) {
1556                         ret = binder_inc_node(ref->node, 0, 1, target_list);
1557                         if (ret)
1558                                 return ret;
1559                 }
1560                 ref->data.weak++;
1561         }
1562         return 0;
1563 }
1564
1565 /**
1566  * binder_dec_ref() - dec the ref for given handle
1567  * @ref:        ref to be decremented
1568  * @strong:     if true, strong decrement, else weak
1569  *
1570  * Decrement the ref.
1571  *
1572  * Return: true if ref is cleaned up and ready to be freed
1573  */
1574 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1575 {
1576         if (strong) {
1577                 if (ref->data.strong == 0) {
1578                         binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1579                                           ref->proc->pid, ref->data.debug_id,
1580                                           ref->data.desc, ref->data.strong,
1581                                           ref->data.weak);
1582                         return false;
1583                 }
1584                 ref->data.strong--;
1585                 if (ref->data.strong == 0)
1586                         binder_dec_node(ref->node, strong, 1);
1587         } else {
1588                 if (ref->data.weak == 0) {
1589                         binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1590                                           ref->proc->pid, ref->data.debug_id,
1591                                           ref->data.desc, ref->data.strong,
1592                                           ref->data.weak);
1593                         return false;
1594                 }
1595                 ref->data.weak--;
1596         }
1597         if (ref->data.strong == 0 && ref->data.weak == 0) {
1598                 binder_cleanup_ref_olocked(ref);
1599                 return true;
1600         }
1601         return false;
1602 }
1603
1604 /**
1605  * binder_get_node_from_ref() - get the node from the given proc/desc
1606  * @proc:       proc containing the ref
1607  * @desc:       the handle associated with the ref
1608  * @need_strong_ref: if true, only return node if ref is strong
1609  * @rdata:      the id/refcount data for the ref
1610  *
1611  * Given a proc and ref handle, return the associated binder_node
1612  *
1613  * Return: a binder_node or NULL if not found or not strong when strong required
1614  */
1615 static struct binder_node *binder_get_node_from_ref(
1616                 struct binder_proc *proc,
1617                 u32 desc, bool need_strong_ref,
1618                 struct binder_ref_data *rdata)
1619 {
1620         struct binder_node *node;
1621         struct binder_ref *ref;
1622
1623         binder_proc_lock(proc);
1624         ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1625         if (!ref)
1626                 goto err_no_ref;
1627         node = ref->node;
1628         /*
1629          * Take an implicit reference on the node to ensure
1630          * it stays alive until the call to binder_put_node()
1631          */
1632         binder_inc_node_tmpref(node);
1633         if (rdata)
1634                 *rdata = ref->data;
1635         binder_proc_unlock(proc);
1636
1637         return node;
1638
1639 err_no_ref:
1640         binder_proc_unlock(proc);
1641         return NULL;
1642 }
1643
1644 /**
1645  * binder_free_ref() - free the binder_ref
1646  * @ref:        ref to free
1647  *
1648  * Free the binder_ref. Free the binder_node indicated by ref->node
1649  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1650  */
1651 static void binder_free_ref(struct binder_ref *ref)
1652 {
1653         if (ref->node)
1654                 binder_free_node(ref->node);
1655         kfree(ref->death);
1656         kfree(ref);
1657 }
1658
1659 /**
1660  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1661  * @proc:       proc containing the ref
1662  * @desc:       the handle associated with the ref
1663  * @increment:  true=inc reference, false=dec reference
1664  * @strong:     true=strong reference, false=weak reference
1665  * @rdata:      the id/refcount data for the ref
1666  *
1667  * Given a proc and ref handle, increment or decrement the ref
1668  * according to "increment" arg.
1669  *
1670  * Return: 0 if successful, else errno
1671  */
1672 static int binder_update_ref_for_handle(struct binder_proc *proc,
1673                 uint32_t desc, bool increment, bool strong,
1674                 struct binder_ref_data *rdata)
1675 {
1676         int ret = 0;
1677         struct binder_ref *ref;
1678         bool delete_ref = false;
1679
1680         binder_proc_lock(proc);
1681         ref = binder_get_ref_olocked(proc, desc, strong);
1682         if (!ref) {
1683                 ret = -EINVAL;
1684                 goto err_no_ref;
1685         }
1686         if (increment)
1687                 ret = binder_inc_ref_olocked(ref, strong, NULL);
1688         else
1689                 delete_ref = binder_dec_ref_olocked(ref, strong);
1690
1691         if (rdata)
1692                 *rdata = ref->data;
1693         binder_proc_unlock(proc);
1694
1695         if (delete_ref)
1696                 binder_free_ref(ref);
1697         return ret;
1698
1699 err_no_ref:
1700         binder_proc_unlock(proc);
1701         return ret;
1702 }
1703
1704 /**
1705  * binder_dec_ref_for_handle() - dec the ref for given handle
1706  * @proc:       proc containing the ref
1707  * @desc:       the handle associated with the ref
1708  * @strong:     true=strong reference, false=weak reference
1709  * @rdata:      the id/refcount data for the ref
1710  *
1711  * Just calls binder_update_ref_for_handle() to decrement the ref.
1712  *
1713  * Return: 0 if successful, else errno
1714  */
1715 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1716                 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1717 {
1718         return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1719 }
1720
1721
1722 /**
1723  * binder_inc_ref_for_node() - increment the ref for given proc/node
1724  * @proc:        proc containing the ref
1725  * @node:        target node
1726  * @strong:      true=strong reference, false=weak reference
1727  * @target_list: worklist to use if node is incremented
1728  * @rdata:       the id/refcount data for the ref
1729  *
1730  * Given a proc and node, increment the ref. Create the ref if it
1731  * doesn't already exist
1732  *
1733  * Return: 0 if successful, else errno
1734  */
1735 static int binder_inc_ref_for_node(struct binder_proc *proc,
1736                         struct binder_node *node,
1737                         bool strong,
1738                         struct list_head *target_list,
1739                         struct binder_ref_data *rdata)
1740 {
1741         struct binder_ref *ref;
1742         struct binder_ref *new_ref = NULL;
1743         int ret = 0;
1744
1745         binder_proc_lock(proc);
1746         ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1747         if (!ref) {
1748                 binder_proc_unlock(proc);
1749                 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1750                 if (!new_ref)
1751                         return -ENOMEM;
1752                 binder_proc_lock(proc);
1753                 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1754         }
1755         ret = binder_inc_ref_olocked(ref, strong, target_list);
1756         *rdata = ref->data;
1757         if (ret && ref == new_ref) {
1758                 /*
1759                  * Cleanup the failed reference here as the target
1760                  * could now be dead and have already released its
1761                  * references by now. Calling on the new reference
1762                  * with strong=0 and a tmp_refs will not decrement
1763                  * the node. The new_ref gets kfree'd below.
1764                  */
1765                 binder_cleanup_ref_olocked(new_ref);
1766                 ref = NULL;
1767         }
1768
1769         binder_proc_unlock(proc);
1770         if (new_ref && ref != new_ref)
1771                 /*
1772                  * Another thread created the ref first so
1773                  * free the one we allocated
1774                  */
1775                 kfree(new_ref);
1776         return ret;
1777 }
1778
1779 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1780                                            struct binder_transaction *t)
1781 {
1782         BUG_ON(!target_thread);
1783         assert_spin_locked(&target_thread->proc->inner_lock);
1784         BUG_ON(target_thread->transaction_stack != t);
1785         BUG_ON(target_thread->transaction_stack->from != target_thread);
1786         target_thread->transaction_stack =
1787                 target_thread->transaction_stack->from_parent;
1788         t->from = NULL;
1789 }
1790
1791 /**
1792  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1793  * @thread:     thread to decrement
1794  *
1795  * A thread needs to be kept alive while being used to create or
1796  * handle a transaction. binder_get_txn_from() is used to safely
1797  * extract t->from from a binder_transaction and keep the thread
1798  * indicated by t->from from being freed. When done with that
1799  * binder_thread, this function is called to decrement the
1800  * tmp_ref and free if appropriate (thread has been released
1801  * and no transaction being processed by the driver)
1802  */
1803 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1804 {
1805         /*
1806          * atomic is used to protect the counter value while
1807          * it cannot reach zero or thread->is_dead is false
1808          */
1809         binder_inner_proc_lock(thread->proc);
1810         atomic_dec(&thread->tmp_ref);
1811         if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1812                 binder_inner_proc_unlock(thread->proc);
1813                 binder_free_thread(thread);
1814                 return;
1815         }
1816         binder_inner_proc_unlock(thread->proc);
1817 }
1818
1819 /**
1820  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1821  * @proc:       proc to decrement
1822  *
1823  * A binder_proc needs to be kept alive while being used to create or
1824  * handle a transaction. proc->tmp_ref is incremented when
1825  * creating a new transaction or the binder_proc is currently in-use
1826  * by threads that are being released. When done with the binder_proc,
1827  * this function is called to decrement the counter and free the
1828  * proc if appropriate (proc has been released, all threads have
1829  * been released and not currenly in-use to process a transaction).
1830  */
1831 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1832 {
1833         binder_inner_proc_lock(proc);
1834         proc->tmp_ref--;
1835         if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1836                         !proc->tmp_ref) {
1837                 binder_inner_proc_unlock(proc);
1838                 binder_free_proc(proc);
1839                 return;
1840         }
1841         binder_inner_proc_unlock(proc);
1842 }
1843
1844 /**
1845  * binder_get_txn_from() - safely extract the "from" thread in transaction
1846  * @t:  binder transaction for t->from
1847  *
1848  * Atomically return the "from" thread and increment the tmp_ref
1849  * count for the thread to ensure it stays alive until
1850  * binder_thread_dec_tmpref() is called.
1851  *
1852  * Return: the value of t->from
1853  */
1854 static struct binder_thread *binder_get_txn_from(
1855                 struct binder_transaction *t)
1856 {
1857         struct binder_thread *from;
1858
1859         spin_lock(&t->lock);
1860         from = t->from;
1861         if (from)
1862                 atomic_inc(&from->tmp_ref);
1863         spin_unlock(&t->lock);
1864         return from;
1865 }
1866
1867 /**
1868  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1869  * @t:  binder transaction for t->from
1870  *
1871  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1872  * to guarantee that the thread cannot be released while operating on it.
1873  * The caller must call binder_inner_proc_unlock() to release the inner lock
1874  * as well as call binder_dec_thread_txn() to release the reference.
1875  *
1876  * Return: the value of t->from
1877  */
1878 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1879                 struct binder_transaction *t)
1880         __acquires(&t->from->proc->inner_lock)
1881 {
1882         struct binder_thread *from;
1883
1884         from = binder_get_txn_from(t);
1885         if (!from) {
1886                 __acquire(&from->proc->inner_lock);
1887                 return NULL;
1888         }
1889         binder_inner_proc_lock(from->proc);
1890         if (t->from) {
1891                 BUG_ON(from != t->from);
1892                 return from;
1893         }
1894         binder_inner_proc_unlock(from->proc);
1895         __acquire(&from->proc->inner_lock);
1896         binder_thread_dec_tmpref(from);
1897         return NULL;
1898 }
1899
1900 /**
1901  * binder_free_txn_fixups() - free unprocessed fd fixups
1902  * @t:  binder transaction for t->from
1903  *
1904  * If the transaction is being torn down prior to being
1905  * processed by the target process, free all of the
1906  * fd fixups and fput the file structs. It is safe to
1907  * call this function after the fixups have been
1908  * processed -- in that case, the list will be empty.
1909  */
1910 static void binder_free_txn_fixups(struct binder_transaction *t)
1911 {
1912         struct binder_txn_fd_fixup *fixup, *tmp;
1913
1914         list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1915                 fput(fixup->file);
1916                 list_del(&fixup->fixup_entry);
1917                 kfree(fixup);
1918         }
1919 }
1920
1921 static void binder_free_transaction(struct binder_transaction *t)
1922 {
1923         struct binder_proc *target_proc = t->to_proc;
1924
1925         if (target_proc) {
1926                 binder_inner_proc_lock(target_proc);
1927                 if (t->buffer)
1928                         t->buffer->transaction = NULL;
1929                 binder_inner_proc_unlock(target_proc);
1930         }
1931         /*
1932          * If the transaction has no target_proc, then
1933          * t->buffer->transaction has already been cleared.
1934          */
1935         binder_free_txn_fixups(t);
1936         kfree(t);
1937         binder_stats_deleted(BINDER_STAT_TRANSACTION);
1938 }
1939
1940 static void binder_send_failed_reply(struct binder_transaction *t,
1941                                      uint32_t error_code)
1942 {
1943         struct binder_thread *target_thread;
1944         struct binder_transaction *next;
1945
1946         BUG_ON(t->flags & TF_ONE_WAY);
1947         while (1) {
1948                 target_thread = binder_get_txn_from_and_acq_inner(t);
1949                 if (target_thread) {
1950                         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1951                                      "send failed reply for transaction %d to %d:%d\n",
1952                                       t->debug_id,
1953                                       target_thread->proc->pid,
1954                                       target_thread->pid);
1955
1956                         binder_pop_transaction_ilocked(target_thread, t);
1957                         if (target_thread->reply_error.cmd == BR_OK) {
1958                                 target_thread->reply_error.cmd = error_code;
1959                                 binder_enqueue_thread_work_ilocked(
1960                                         target_thread,
1961                                         &target_thread->reply_error.work);
1962                                 wake_up_interruptible(&target_thread->wait);
1963                         } else {
1964                                 /*
1965                                  * Cannot get here for normal operation, but
1966                                  * we can if multiple synchronous transactions
1967                                  * are sent without blocking for responses.
1968                                  * Just ignore the 2nd error in this case.
1969                                  */
1970                                 pr_warn("Unexpected reply error: %u\n",
1971                                         target_thread->reply_error.cmd);
1972                         }
1973                         binder_inner_proc_unlock(target_thread->proc);
1974                         binder_thread_dec_tmpref(target_thread);
1975                         binder_free_transaction(t);
1976                         return;
1977                 }
1978                 __release(&target_thread->proc->inner_lock);
1979                 next = t->from_parent;
1980
1981                 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1982                              "send failed reply for transaction %d, target dead\n",
1983                              t->debug_id);
1984
1985                 binder_free_transaction(t);
1986                 if (next == NULL) {
1987                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
1988                                      "reply failed, no target thread at root\n");
1989                         return;
1990                 }
1991                 t = next;
1992                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1993                              "reply failed, no target thread -- retry %d\n",
1994                               t->debug_id);
1995         }
1996 }
1997
1998 /**
1999  * binder_cleanup_transaction() - cleans up undelivered transaction
2000  * @t:          transaction that needs to be cleaned up
2001  * @reason:     reason the transaction wasn't delivered
2002  * @error_code: error to return to caller (if synchronous call)
2003  */
2004 static void binder_cleanup_transaction(struct binder_transaction *t,
2005                                        const char *reason,
2006                                        uint32_t error_code)
2007 {
2008         if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2009                 binder_send_failed_reply(t, error_code);
2010         } else {
2011                 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2012                         "undelivered transaction %d, %s\n",
2013                         t->debug_id, reason);
2014                 binder_free_transaction(t);
2015         }
2016 }
2017
2018 /**
2019  * binder_get_object() - gets object and checks for valid metadata
2020  * @proc:       binder_proc owning the buffer
2021  * @u:          sender's user pointer to base of buffer
2022  * @buffer:     binder_buffer that we're parsing.
2023  * @offset:     offset in the @buffer at which to validate an object.
2024  * @object:     struct binder_object to read into
2025  *
2026  * Copy the binder object at the given offset into @object. If @u is
2027  * provided then the copy is from the sender's buffer. If not, then
2028  * it is copied from the target's @buffer.
2029  *
2030  * Return:      If there's a valid metadata object at @offset, the
2031  *              size of that object. Otherwise, it returns zero. The object
2032  *              is read into the struct binder_object pointed to by @object.
2033  */
2034 static size_t binder_get_object(struct binder_proc *proc,
2035                                 const void __user *u,
2036                                 struct binder_buffer *buffer,
2037                                 unsigned long offset,
2038                                 struct binder_object *object)
2039 {
2040         size_t read_size;
2041         struct binder_object_header *hdr;
2042         size_t object_size = 0;
2043
2044         read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2045         if (offset > buffer->data_size || read_size < sizeof(*hdr))
2046                 return 0;
2047         if (u) {
2048                 if (copy_from_user(object, u + offset, read_size))
2049                         return 0;
2050         } else {
2051                 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2052                                                   offset, read_size))
2053                         return 0;
2054         }
2055
2056         /* Ok, now see if we read a complete object. */
2057         hdr = &object->hdr;
2058         switch (hdr->type) {
2059         case BINDER_TYPE_BINDER:
2060         case BINDER_TYPE_WEAK_BINDER:
2061         case BINDER_TYPE_HANDLE:
2062         case BINDER_TYPE_WEAK_HANDLE:
2063                 object_size = sizeof(struct flat_binder_object);
2064                 break;
2065         case BINDER_TYPE_FD:
2066                 object_size = sizeof(struct binder_fd_object);
2067                 break;
2068         case BINDER_TYPE_PTR:
2069                 object_size = sizeof(struct binder_buffer_object);
2070                 break;
2071         case BINDER_TYPE_FDA:
2072                 object_size = sizeof(struct binder_fd_array_object);
2073                 break;
2074         default:
2075                 return 0;
2076         }
2077         if (offset <= buffer->data_size - object_size &&
2078             buffer->data_size >= object_size)
2079                 return object_size;
2080         else
2081                 return 0;
2082 }
2083
2084 /**
2085  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2086  * @proc:       binder_proc owning the buffer
2087  * @b:          binder_buffer containing the object
2088  * @object:     struct binder_object to read into
2089  * @index:      index in offset array at which the binder_buffer_object is
2090  *              located
2091  * @start_offset: points to the start of the offset array
2092  * @object_offsetp: offset of @object read from @b
2093  * @num_valid:  the number of valid offsets in the offset array
2094  *
2095  * Return:      If @index is within the valid range of the offset array
2096  *              described by @start and @num_valid, and if there's a valid
2097  *              binder_buffer_object at the offset found in index @index
2098  *              of the offset array, that object is returned. Otherwise,
2099  *              %NULL is returned.
2100  *              Note that the offset found in index @index itself is not
2101  *              verified; this function assumes that @num_valid elements
2102  *              from @start were previously verified to have valid offsets.
2103  *              If @object_offsetp is non-NULL, then the offset within
2104  *              @b is written to it.
2105  */
2106 static struct binder_buffer_object *binder_validate_ptr(
2107                                                 struct binder_proc *proc,
2108                                                 struct binder_buffer *b,
2109                                                 struct binder_object *object,
2110                                                 binder_size_t index,
2111                                                 binder_size_t start_offset,
2112                                                 binder_size_t *object_offsetp,
2113                                                 binder_size_t num_valid)
2114 {
2115         size_t object_size;
2116         binder_size_t object_offset;
2117         unsigned long buffer_offset;
2118
2119         if (index >= num_valid)
2120                 return NULL;
2121
2122         buffer_offset = start_offset + sizeof(binder_size_t) * index;
2123         if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2124                                           b, buffer_offset,
2125                                           sizeof(object_offset)))
2126                 return NULL;
2127         object_size = binder_get_object(proc, NULL, b, object_offset, object);
2128         if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2129                 return NULL;
2130         if (object_offsetp)
2131                 *object_offsetp = object_offset;
2132
2133         return &object->bbo;
2134 }
2135
2136 /**
2137  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2138  * @proc:               binder_proc owning the buffer
2139  * @b:                  transaction buffer
2140  * @objects_start_offset: offset to start of objects buffer
2141  * @buffer_obj_offset:  offset to binder_buffer_object in which to fix up
2142  * @fixup_offset:       start offset in @buffer to fix up
2143  * @last_obj_offset:    offset to last binder_buffer_object that we fixed
2144  * @last_min_offset:    minimum fixup offset in object at @last_obj_offset
2145  *
2146  * Return:              %true if a fixup in buffer @buffer at offset @offset is
2147  *                      allowed.
2148  *
2149  * For safety reasons, we only allow fixups inside a buffer to happen
2150  * at increasing offsets; additionally, we only allow fixup on the last
2151  * buffer object that was verified, or one of its parents.
2152  *
2153  * Example of what is allowed:
2154  *
2155  * A
2156  *   B (parent = A, offset = 0)
2157  *   C (parent = A, offset = 16)
2158  *     D (parent = C, offset = 0)
2159  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2160  *
2161  * Examples of what is not allowed:
2162  *
2163  * Decreasing offsets within the same parent:
2164  * A
2165  *   C (parent = A, offset = 16)
2166  *   B (parent = A, offset = 0) // decreasing offset within A
2167  *
2168  * Referring to a parent that wasn't the last object or any of its parents:
2169  * A
2170  *   B (parent = A, offset = 0)
2171  *   C (parent = A, offset = 0)
2172  *   C (parent = A, offset = 16)
2173  *     D (parent = B, offset = 0) // B is not A or any of A's parents
2174  */
2175 static bool binder_validate_fixup(struct binder_proc *proc,
2176                                   struct binder_buffer *b,
2177                                   binder_size_t objects_start_offset,
2178                                   binder_size_t buffer_obj_offset,
2179                                   binder_size_t fixup_offset,
2180                                   binder_size_t last_obj_offset,
2181                                   binder_size_t last_min_offset)
2182 {
2183         if (!last_obj_offset) {
2184                 /* Nothing to fix up in */
2185                 return false;
2186         }
2187
2188         while (last_obj_offset != buffer_obj_offset) {
2189                 unsigned long buffer_offset;
2190                 struct binder_object last_object;
2191                 struct binder_buffer_object *last_bbo;
2192                 size_t object_size = binder_get_object(proc, NULL, b,
2193                                                        last_obj_offset,
2194                                                        &last_object);
2195                 if (object_size != sizeof(*last_bbo))
2196                         return false;
2197
2198                 last_bbo = &last_object.bbo;
2199                 /*
2200                  * Safe to retrieve the parent of last_obj, since it
2201                  * was already previously verified by the driver.
2202                  */
2203                 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2204                         return false;
2205                 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2206                 buffer_offset = objects_start_offset +
2207                         sizeof(binder_size_t) * last_bbo->parent;
2208                 if (binder_alloc_copy_from_buffer(&proc->alloc,
2209                                                   &last_obj_offset,
2210                                                   b, buffer_offset,
2211                                                   sizeof(last_obj_offset)))
2212                         return false;
2213         }
2214         return (fixup_offset >= last_min_offset);
2215 }
2216
2217 /**
2218  * struct binder_task_work_cb - for deferred close
2219  *
2220  * @twork:                callback_head for task work
2221  * @fd:                   fd to close
2222  *
2223  * Structure to pass task work to be handled after
2224  * returning from binder_ioctl() via task_work_add().
2225  */
2226 struct binder_task_work_cb {
2227         struct callback_head twork;
2228         struct file *file;
2229 };
2230
2231 /**
2232  * binder_do_fd_close() - close list of file descriptors
2233  * @twork:      callback head for task work
2234  *
2235  * It is not safe to call ksys_close() during the binder_ioctl()
2236  * function if there is a chance that binder's own file descriptor
2237  * might be closed. This is to meet the requirements for using
2238  * fdget() (see comments for __fget_light()). Therefore use
2239  * task_work_add() to schedule the close operation once we have
2240  * returned from binder_ioctl(). This function is a callback
2241  * for that mechanism and does the actual ksys_close() on the
2242  * given file descriptor.
2243  */
2244 static void binder_do_fd_close(struct callback_head *twork)
2245 {
2246         struct binder_task_work_cb *twcb = container_of(twork,
2247                         struct binder_task_work_cb, twork);
2248
2249         fput(twcb->file);
2250         kfree(twcb);
2251 }
2252
2253 /**
2254  * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2255  * @fd:         file-descriptor to close
2256  *
2257  * See comments in binder_do_fd_close(). This function is used to schedule
2258  * a file-descriptor to be closed after returning from binder_ioctl().
2259  */
2260 static void binder_deferred_fd_close(int fd)
2261 {
2262         struct binder_task_work_cb *twcb;
2263
2264         twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2265         if (!twcb)
2266                 return;
2267         init_task_work(&twcb->twork, binder_do_fd_close);
2268         close_fd_get_file(fd, &twcb->file);
2269         if (twcb->file) {
2270                 filp_close(twcb->file, current->files);
2271                 task_work_add(current, &twcb->twork, TWA_RESUME);
2272         } else {
2273                 kfree(twcb);
2274         }
2275 }
2276
2277 static void binder_transaction_buffer_release(struct binder_proc *proc,
2278                                               struct binder_thread *thread,
2279                                               struct binder_buffer *buffer,
2280                                               binder_size_t off_end_offset,
2281                                               bool is_failure)
2282 {
2283         int debug_id = buffer->debug_id;
2284         binder_size_t off_start_offset, buffer_offset;
2285
2286         binder_debug(BINDER_DEBUG_TRANSACTION,
2287                      "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2288                      proc->pid, buffer->debug_id,
2289                      buffer->data_size, buffer->offsets_size,
2290                      (unsigned long long)off_end_offset);
2291
2292         if (buffer->target_node)
2293                 binder_dec_node(buffer->target_node, 1, 0);
2294
2295         off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2296
2297         for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2298              buffer_offset += sizeof(binder_size_t)) {
2299                 struct binder_object_header *hdr;
2300                 size_t object_size = 0;
2301                 struct binder_object object;
2302                 binder_size_t object_offset;
2303
2304                 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2305                                                    buffer, buffer_offset,
2306                                                    sizeof(object_offset)))
2307                         object_size = binder_get_object(proc, NULL, buffer,
2308                                                         object_offset, &object);
2309                 if (object_size == 0) {
2310                         pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2311                                debug_id, (u64)object_offset, buffer->data_size);
2312                         continue;
2313                 }
2314                 hdr = &object.hdr;
2315                 switch (hdr->type) {
2316                 case BINDER_TYPE_BINDER:
2317                 case BINDER_TYPE_WEAK_BINDER: {
2318                         struct flat_binder_object *fp;
2319                         struct binder_node *node;
2320
2321                         fp = to_flat_binder_object(hdr);
2322                         node = binder_get_node(proc, fp->binder);
2323                         if (node == NULL) {
2324                                 pr_err("transaction release %d bad node %016llx\n",
2325                                        debug_id, (u64)fp->binder);
2326                                 break;
2327                         }
2328                         binder_debug(BINDER_DEBUG_TRANSACTION,
2329                                      "        node %d u%016llx\n",
2330                                      node->debug_id, (u64)node->ptr);
2331                         binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2332                                         0);
2333                         binder_put_node(node);
2334                 } break;
2335                 case BINDER_TYPE_HANDLE:
2336                 case BINDER_TYPE_WEAK_HANDLE: {
2337                         struct flat_binder_object *fp;
2338                         struct binder_ref_data rdata;
2339                         int ret;
2340
2341                         fp = to_flat_binder_object(hdr);
2342                         ret = binder_dec_ref_for_handle(proc, fp->handle,
2343                                 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2344
2345                         if (ret) {
2346                                 pr_err("transaction release %d bad handle %d, ret = %d\n",
2347                                  debug_id, fp->handle, ret);
2348                                 break;
2349                         }
2350                         binder_debug(BINDER_DEBUG_TRANSACTION,
2351                                      "        ref %d desc %d\n",
2352                                      rdata.debug_id, rdata.desc);
2353                 } break;
2354
2355                 case BINDER_TYPE_FD: {
2356                         /*
2357                          * No need to close the file here since user-space
2358                          * closes it for for successfully delivered
2359                          * transactions. For transactions that weren't
2360                          * delivered, the new fd was never allocated so
2361                          * there is no need to close and the fput on the
2362                          * file is done when the transaction is torn
2363                          * down.
2364                          */
2365                 } break;
2366                 case BINDER_TYPE_PTR:
2367                         /*
2368                          * Nothing to do here, this will get cleaned up when the
2369                          * transaction buffer gets freed
2370                          */
2371                         break;
2372                 case BINDER_TYPE_FDA: {
2373                         struct binder_fd_array_object *fda;
2374                         struct binder_buffer_object *parent;
2375                         struct binder_object ptr_object;
2376                         binder_size_t fda_offset;
2377                         size_t fd_index;
2378                         binder_size_t fd_buf_size;
2379                         binder_size_t num_valid;
2380
2381                         if (is_failure) {
2382                                 /*
2383                                  * The fd fixups have not been applied so no
2384                                  * fds need to be closed.
2385                                  */
2386                                 continue;
2387                         }
2388
2389                         num_valid = (buffer_offset - off_start_offset) /
2390                                                 sizeof(binder_size_t);
2391                         fda = to_binder_fd_array_object(hdr);
2392                         parent = binder_validate_ptr(proc, buffer, &ptr_object,
2393                                                      fda->parent,
2394                                                      off_start_offset,
2395                                                      NULL,
2396                                                      num_valid);
2397                         if (!parent) {
2398                                 pr_err("transaction release %d bad parent offset\n",
2399                                        debug_id);
2400                                 continue;
2401                         }
2402                         fd_buf_size = sizeof(u32) * fda->num_fds;
2403                         if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2404                                 pr_err("transaction release %d invalid number of fds (%lld)\n",
2405                                        debug_id, (u64)fda->num_fds);
2406                                 continue;
2407                         }
2408                         if (fd_buf_size > parent->length ||
2409                             fda->parent_offset > parent->length - fd_buf_size) {
2410                                 /* No space for all file descriptors here. */
2411                                 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2412                                        debug_id, (u64)fda->num_fds);
2413                                 continue;
2414                         }
2415                         /*
2416                          * the source data for binder_buffer_object is visible
2417                          * to user-space and the @buffer element is the user
2418                          * pointer to the buffer_object containing the fd_array.
2419                          * Convert the address to an offset relative to
2420                          * the base of the transaction buffer.
2421                          */
2422                         fda_offset =
2423                             (parent->buffer - (uintptr_t)buffer->user_data) +
2424                             fda->parent_offset;
2425                         for (fd_index = 0; fd_index < fda->num_fds;
2426                              fd_index++) {
2427                                 u32 fd;
2428                                 int err;
2429                                 binder_size_t offset = fda_offset +
2430                                         fd_index * sizeof(fd);
2431
2432                                 err = binder_alloc_copy_from_buffer(
2433                                                 &proc->alloc, &fd, buffer,
2434                                                 offset, sizeof(fd));
2435                                 WARN_ON(err);
2436                                 if (!err) {
2437                                         binder_deferred_fd_close(fd);
2438                                         /*
2439                                          * Need to make sure the thread goes
2440                                          * back to userspace to complete the
2441                                          * deferred close
2442                                          */
2443                                         if (thread)
2444                                                 thread->looper_need_return = true;
2445                                 }
2446                         }
2447                 } break;
2448                 default:
2449                         pr_err("transaction release %d bad object type %x\n",
2450                                 debug_id, hdr->type);
2451                         break;
2452                 }
2453         }
2454 }
2455
2456 /* Clean up all the objects in the buffer */
2457 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2458                                                 struct binder_thread *thread,
2459                                                 struct binder_buffer *buffer,
2460                                                 bool is_failure)
2461 {
2462         binder_size_t off_end_offset;
2463
2464         off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2465         off_end_offset += buffer->offsets_size;
2466
2467         binder_transaction_buffer_release(proc, thread, buffer,
2468                                           off_end_offset, is_failure);
2469 }
2470
2471 static int binder_translate_binder(struct flat_binder_object *fp,
2472                                    struct binder_transaction *t,
2473                                    struct binder_thread *thread)
2474 {
2475         struct binder_node *node;
2476         struct binder_proc *proc = thread->proc;
2477         struct binder_proc *target_proc = t->to_proc;
2478         struct binder_ref_data rdata;
2479         int ret = 0;
2480
2481         node = binder_get_node(proc, fp->binder);
2482         if (!node) {
2483                 node = binder_new_node(proc, fp);
2484                 if (!node)
2485                         return -ENOMEM;
2486         }
2487         if (fp->cookie != node->cookie) {
2488                 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2489                                   proc->pid, thread->pid, (u64)fp->binder,
2490                                   node->debug_id, (u64)fp->cookie,
2491                                   (u64)node->cookie);
2492                 ret = -EINVAL;
2493                 goto done;
2494         }
2495         if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2496                 ret = -EPERM;
2497                 goto done;
2498         }
2499
2500         ret = binder_inc_ref_for_node(target_proc, node,
2501                         fp->hdr.type == BINDER_TYPE_BINDER,
2502                         &thread->todo, &rdata);
2503         if (ret)
2504                 goto done;
2505
2506         if (fp->hdr.type == BINDER_TYPE_BINDER)
2507                 fp->hdr.type = BINDER_TYPE_HANDLE;
2508         else
2509                 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2510         fp->binder = 0;
2511         fp->handle = rdata.desc;
2512         fp->cookie = 0;
2513
2514         trace_binder_transaction_node_to_ref(t, node, &rdata);
2515         binder_debug(BINDER_DEBUG_TRANSACTION,
2516                      "        node %d u%016llx -> ref %d desc %d\n",
2517                      node->debug_id, (u64)node->ptr,
2518                      rdata.debug_id, rdata.desc);
2519 done:
2520         binder_put_node(node);
2521         return ret;
2522 }
2523
2524 static int binder_translate_handle(struct flat_binder_object *fp,
2525                                    struct binder_transaction *t,
2526                                    struct binder_thread *thread)
2527 {
2528         struct binder_proc *proc = thread->proc;
2529         struct binder_proc *target_proc = t->to_proc;
2530         struct binder_node *node;
2531         struct binder_ref_data src_rdata;
2532         int ret = 0;
2533
2534         node = binder_get_node_from_ref(proc, fp->handle,
2535                         fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2536         if (!node) {
2537                 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2538                                   proc->pid, thread->pid, fp->handle);
2539                 return -EINVAL;
2540         }
2541         if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2542                 ret = -EPERM;
2543                 goto done;
2544         }
2545
2546         binder_node_lock(node);
2547         if (node->proc == target_proc) {
2548                 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2549                         fp->hdr.type = BINDER_TYPE_BINDER;
2550                 else
2551                         fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2552                 fp->binder = node->ptr;
2553                 fp->cookie = node->cookie;
2554                 if (node->proc)
2555                         binder_inner_proc_lock(node->proc);
2556                 else
2557                         __acquire(&node->proc->inner_lock);
2558                 binder_inc_node_nilocked(node,
2559                                          fp->hdr.type == BINDER_TYPE_BINDER,
2560                                          0, NULL);
2561                 if (node->proc)
2562                         binder_inner_proc_unlock(node->proc);
2563                 else
2564                         __release(&node->proc->inner_lock);
2565                 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2566                 binder_debug(BINDER_DEBUG_TRANSACTION,
2567                              "        ref %d desc %d -> node %d u%016llx\n",
2568                              src_rdata.debug_id, src_rdata.desc, node->debug_id,
2569                              (u64)node->ptr);
2570                 binder_node_unlock(node);
2571         } else {
2572                 struct binder_ref_data dest_rdata;
2573
2574                 binder_node_unlock(node);
2575                 ret = binder_inc_ref_for_node(target_proc, node,
2576                                 fp->hdr.type == BINDER_TYPE_HANDLE,
2577                                 NULL, &dest_rdata);
2578                 if (ret)
2579                         goto done;
2580
2581                 fp->binder = 0;
2582                 fp->handle = dest_rdata.desc;
2583                 fp->cookie = 0;
2584                 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2585                                                     &dest_rdata);
2586                 binder_debug(BINDER_DEBUG_TRANSACTION,
2587                              "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2588                              src_rdata.debug_id, src_rdata.desc,
2589                              dest_rdata.debug_id, dest_rdata.desc,
2590                              node->debug_id);
2591         }
2592 done:
2593         binder_put_node(node);
2594         return ret;
2595 }
2596
2597 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2598                                struct binder_transaction *t,
2599                                struct binder_thread *thread,
2600                                struct binder_transaction *in_reply_to)
2601 {
2602         struct binder_proc *proc = thread->proc;
2603         struct binder_proc *target_proc = t->to_proc;
2604         struct binder_txn_fd_fixup *fixup;
2605         struct file *file;
2606         int ret = 0;
2607         bool target_allows_fd;
2608
2609         if (in_reply_to)
2610                 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2611         else
2612                 target_allows_fd = t->buffer->target_node->accept_fds;
2613         if (!target_allows_fd) {
2614                 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2615                                   proc->pid, thread->pid,
2616                                   in_reply_to ? "reply" : "transaction",
2617                                   fd);
2618                 ret = -EPERM;
2619                 goto err_fd_not_accepted;
2620         }
2621
2622         file = fget(fd);
2623         if (!file) {
2624                 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2625                                   proc->pid, thread->pid, fd);
2626                 ret = -EBADF;
2627                 goto err_fget;
2628         }
2629         ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2630         if (ret < 0) {
2631                 ret = -EPERM;
2632                 goto err_security;
2633         }
2634
2635         /*
2636          * Add fixup record for this transaction. The allocation
2637          * of the fd in the target needs to be done from a
2638          * target thread.
2639          */
2640         fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2641         if (!fixup) {
2642                 ret = -ENOMEM;
2643                 goto err_alloc;
2644         }
2645         fixup->file = file;
2646         fixup->offset = fd_offset;
2647         trace_binder_transaction_fd_send(t, fd, fixup->offset);
2648         list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2649
2650         return ret;
2651
2652 err_alloc:
2653 err_security:
2654         fput(file);
2655 err_fget:
2656 err_fd_not_accepted:
2657         return ret;
2658 }
2659
2660 /**
2661  * struct binder_ptr_fixup - data to be fixed-up in target buffer
2662  * @offset      offset in target buffer to fixup
2663  * @skip_size   bytes to skip in copy (fixup will be written later)
2664  * @fixup_data  data to write at fixup offset
2665  * @node        list node
2666  *
2667  * This is used for the pointer fixup list (pf) which is created and consumed
2668  * during binder_transaction() and is only accessed locally. No
2669  * locking is necessary.
2670  *
2671  * The list is ordered by @offset.
2672  */
2673 struct binder_ptr_fixup {
2674         binder_size_t offset;
2675         size_t skip_size;
2676         binder_uintptr_t fixup_data;
2677         struct list_head node;
2678 };
2679
2680 /**
2681  * struct binder_sg_copy - scatter-gather data to be copied
2682  * @offset              offset in target buffer
2683  * @sender_uaddr        user address in source buffer
2684  * @length              bytes to copy
2685  * @node                list node
2686  *
2687  * This is used for the sg copy list (sgc) which is created and consumed
2688  * during binder_transaction() and is only accessed locally. No
2689  * locking is necessary.
2690  *
2691  * The list is ordered by @offset.
2692  */
2693 struct binder_sg_copy {
2694         binder_size_t offset;
2695         const void __user *sender_uaddr;
2696         size_t length;
2697         struct list_head node;
2698 };
2699
2700 /**
2701  * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2702  * @alloc:      binder_alloc associated with @buffer
2703  * @buffer:     binder buffer in target process
2704  * @sgc_head:   list_head of scatter-gather copy list
2705  * @pf_head:    list_head of pointer fixup list
2706  *
2707  * Processes all elements of @sgc_head, applying fixups from @pf_head
2708  * and copying the scatter-gather data from the source process' user
2709  * buffer to the target's buffer. It is expected that the list creation
2710  * and processing all occurs during binder_transaction() so these lists
2711  * are only accessed in local context.
2712  *
2713  * Return: 0=success, else -errno
2714  */
2715 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2716                                          struct binder_buffer *buffer,
2717                                          struct list_head *sgc_head,
2718                                          struct list_head *pf_head)
2719 {
2720         int ret = 0;
2721         struct binder_sg_copy *sgc, *tmpsgc;
2722         struct binder_ptr_fixup *tmppf;
2723         struct binder_ptr_fixup *pf =
2724                 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2725                                          node);
2726
2727         list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2728                 size_t bytes_copied = 0;
2729
2730                 while (bytes_copied < sgc->length) {
2731                         size_t copy_size;
2732                         size_t bytes_left = sgc->length - bytes_copied;
2733                         size_t offset = sgc->offset + bytes_copied;
2734
2735                         /*
2736                          * We copy up to the fixup (pointed to by pf)
2737                          */
2738                         copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2739                                        : bytes_left;
2740                         if (!ret && copy_size)
2741                                 ret = binder_alloc_copy_user_to_buffer(
2742                                                 alloc, buffer,
2743                                                 offset,
2744                                                 sgc->sender_uaddr + bytes_copied,
2745                                                 copy_size);
2746                         bytes_copied += copy_size;
2747                         if (copy_size != bytes_left) {
2748                                 BUG_ON(!pf);
2749                                 /* we stopped at a fixup offset */
2750                                 if (pf->skip_size) {
2751                                         /*
2752                                          * we are just skipping. This is for
2753                                          * BINDER_TYPE_FDA where the translated
2754                                          * fds will be fixed up when we get
2755                                          * to target context.
2756                                          */
2757                                         bytes_copied += pf->skip_size;
2758                                 } else {
2759                                         /* apply the fixup indicated by pf */
2760                                         if (!ret)
2761                                                 ret = binder_alloc_copy_to_buffer(
2762                                                         alloc, buffer,
2763                                                         pf->offset,
2764                                                         &pf->fixup_data,
2765                                                         sizeof(pf->fixup_data));
2766                                         bytes_copied += sizeof(pf->fixup_data);
2767                                 }
2768                                 list_del(&pf->node);
2769                                 kfree(pf);
2770                                 pf = list_first_entry_or_null(pf_head,
2771                                                 struct binder_ptr_fixup, node);
2772                         }
2773                 }
2774                 list_del(&sgc->node);
2775                 kfree(sgc);
2776         }
2777         list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2778                 BUG_ON(pf->skip_size == 0);
2779                 list_del(&pf->node);
2780                 kfree(pf);
2781         }
2782         BUG_ON(!list_empty(sgc_head));
2783
2784         return ret > 0 ? -EINVAL : ret;
2785 }
2786
2787 /**
2788  * binder_cleanup_deferred_txn_lists() - free specified lists
2789  * @sgc_head:   list_head of scatter-gather copy list
2790  * @pf_head:    list_head of pointer fixup list
2791  *
2792  * Called to clean up @sgc_head and @pf_head if there is an
2793  * error.
2794  */
2795 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2796                                               struct list_head *pf_head)
2797 {
2798         struct binder_sg_copy *sgc, *tmpsgc;
2799         struct binder_ptr_fixup *pf, *tmppf;
2800
2801         list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2802                 list_del(&sgc->node);
2803                 kfree(sgc);
2804         }
2805         list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2806                 list_del(&pf->node);
2807                 kfree(pf);
2808         }
2809 }
2810
2811 /**
2812  * binder_defer_copy() - queue a scatter-gather buffer for copy
2813  * @sgc_head:           list_head of scatter-gather copy list
2814  * @offset:             binder buffer offset in target process
2815  * @sender_uaddr:       user address in source process
2816  * @length:             bytes to copy
2817  *
2818  * Specify a scatter-gather block to be copied. The actual copy must
2819  * be deferred until all the needed fixups are identified and queued.
2820  * Then the copy and fixups are done together so un-translated values
2821  * from the source are never visible in the target buffer.
2822  *
2823  * We are guaranteed that repeated calls to this function will have
2824  * monotonically increasing @offset values so the list will naturally
2825  * be ordered.
2826  *
2827  * Return: 0=success, else -errno
2828  */
2829 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2830                              const void __user *sender_uaddr, size_t length)
2831 {
2832         struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2833
2834         if (!bc)
2835                 return -ENOMEM;
2836
2837         bc->offset = offset;
2838         bc->sender_uaddr = sender_uaddr;
2839         bc->length = length;
2840         INIT_LIST_HEAD(&bc->node);
2841
2842         /*
2843          * We are guaranteed that the deferred copies are in-order
2844          * so just add to the tail.
2845          */
2846         list_add_tail(&bc->node, sgc_head);
2847
2848         return 0;
2849 }
2850
2851 /**
2852  * binder_add_fixup() - queue a fixup to be applied to sg copy
2853  * @pf_head:    list_head of binder ptr fixup list
2854  * @offset:     binder buffer offset in target process
2855  * @fixup:      bytes to be copied for fixup
2856  * @skip_size:  bytes to skip when copying (fixup will be applied later)
2857  *
2858  * Add the specified fixup to a list ordered by @offset. When copying
2859  * the scatter-gather buffers, the fixup will be copied instead of
2860  * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2861  * will be applied later (in target process context), so we just skip
2862  * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2863  * value in @fixup.
2864  *
2865  * This function is called *mostly* in @offset order, but there are
2866  * exceptions. Since out-of-order inserts are relatively uncommon,
2867  * we insert the new element by searching backward from the tail of
2868  * the list.
2869  *
2870  * Return: 0=success, else -errno
2871  */
2872 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2873                             binder_uintptr_t fixup, size_t skip_size)
2874 {
2875         struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2876         struct binder_ptr_fixup *tmppf;
2877
2878         if (!pf)
2879                 return -ENOMEM;
2880
2881         pf->offset = offset;
2882         pf->fixup_data = fixup;
2883         pf->skip_size = skip_size;
2884         INIT_LIST_HEAD(&pf->node);
2885
2886         /* Fixups are *mostly* added in-order, but there are some
2887          * exceptions. Look backwards through list for insertion point.
2888          */
2889         list_for_each_entry_reverse(tmppf, pf_head, node) {
2890                 if (tmppf->offset < pf->offset) {
2891                         list_add(&pf->node, &tmppf->node);
2892                         return 0;
2893                 }
2894         }
2895         /*
2896          * if we get here, then the new offset is the lowest so
2897          * insert at the head
2898          */
2899         list_add(&pf->node, pf_head);
2900         return 0;
2901 }
2902
2903 static int binder_translate_fd_array(struct list_head *pf_head,
2904                                      struct binder_fd_array_object *fda,
2905                                      const void __user *sender_ubuffer,
2906                                      struct binder_buffer_object *parent,
2907                                      struct binder_buffer_object *sender_uparent,
2908                                      struct binder_transaction *t,
2909                                      struct binder_thread *thread,
2910                                      struct binder_transaction *in_reply_to)
2911 {
2912         binder_size_t fdi, fd_buf_size;
2913         binder_size_t fda_offset;
2914         const void __user *sender_ufda_base;
2915         struct binder_proc *proc = thread->proc;
2916         int ret;
2917
2918         if (fda->num_fds == 0)
2919                 return 0;
2920
2921         fd_buf_size = sizeof(u32) * fda->num_fds;
2922         if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2923                 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2924                                   proc->pid, thread->pid, (u64)fda->num_fds);
2925                 return -EINVAL;
2926         }
2927         if (fd_buf_size > parent->length ||
2928             fda->parent_offset > parent->length - fd_buf_size) {
2929                 /* No space for all file descriptors here. */
2930                 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2931                                   proc->pid, thread->pid, (u64)fda->num_fds);
2932                 return -EINVAL;
2933         }
2934         /*
2935          * the source data for binder_buffer_object is visible
2936          * to user-space and the @buffer element is the user
2937          * pointer to the buffer_object containing the fd_array.
2938          * Convert the address to an offset relative to
2939          * the base of the transaction buffer.
2940          */
2941         fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2942                 fda->parent_offset;
2943         sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2944                                 fda->parent_offset;
2945
2946         if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2947             !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2948                 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2949                                   proc->pid, thread->pid);
2950                 return -EINVAL;
2951         }
2952         ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2953         if (ret)
2954                 return ret;
2955
2956         for (fdi = 0; fdi < fda->num_fds; fdi++) {
2957                 u32 fd;
2958                 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2959                 binder_size_t sender_uoffset = fdi * sizeof(fd);
2960
2961                 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2962                 if (!ret)
2963                         ret = binder_translate_fd(fd, offset, t, thread,
2964                                                   in_reply_to);
2965                 if (ret)
2966                         return ret > 0 ? -EINVAL : ret;
2967         }
2968         return 0;
2969 }
2970
2971 static int binder_fixup_parent(struct list_head *pf_head,
2972                                struct binder_transaction *t,
2973                                struct binder_thread *thread,
2974                                struct binder_buffer_object *bp,
2975                                binder_size_t off_start_offset,
2976                                binder_size_t num_valid,
2977                                binder_size_t last_fixup_obj_off,
2978                                binder_size_t last_fixup_min_off)
2979 {
2980         struct binder_buffer_object *parent;
2981         struct binder_buffer *b = t->buffer;
2982         struct binder_proc *proc = thread->proc;
2983         struct binder_proc *target_proc = t->to_proc;
2984         struct binder_object object;
2985         binder_size_t buffer_offset;
2986         binder_size_t parent_offset;
2987
2988         if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2989                 return 0;
2990
2991         parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2992                                      off_start_offset, &parent_offset,
2993                                      num_valid);
2994         if (!parent) {
2995                 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2996                                   proc->pid, thread->pid);
2997                 return -EINVAL;
2998         }
2999
3000         if (!binder_validate_fixup(target_proc, b, off_start_offset,
3001                                    parent_offset, bp->parent_offset,
3002                                    last_fixup_obj_off,
3003                                    last_fixup_min_off)) {
3004                 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3005                                   proc->pid, thread->pid);
3006                 return -EINVAL;
3007         }
3008
3009         if (parent->length < sizeof(binder_uintptr_t) ||
3010             bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
3011                 /* No space for a pointer here! */
3012                 binder_user_error("%d:%d got transaction with invalid parent offset\n",
3013                                   proc->pid, thread->pid);
3014                 return -EINVAL;
3015         }
3016         buffer_offset = bp->parent_offset +
3017                         (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
3018         return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
3019 }
3020
3021 /**
3022  * binder_proc_transaction() - sends a transaction to a process and wakes it up
3023  * @t:          transaction to send
3024  * @proc:       process to send the transaction to
3025  * @thread:     thread in @proc to send the transaction to (may be NULL)
3026  *
3027  * This function queues a transaction to the specified process. It will try
3028  * to find a thread in the target process to handle the transaction and
3029  * wake it up. If no thread is found, the work is queued to the proc
3030  * waitqueue.
3031  *
3032  * If the @thread parameter is not NULL, the transaction is always queued
3033  * to the waitlist of that specific thread.
3034  *
3035  * Return:      true if the transactions was successfully queued
3036  *              false if the target process or thread is dead
3037  */
3038 static bool binder_proc_transaction(struct binder_transaction *t,
3039                                     struct binder_proc *proc,
3040                                     struct binder_thread *thread)
3041 {
3042         struct binder_node *node = t->buffer->target_node;
3043         bool oneway = !!(t->flags & TF_ONE_WAY);
3044         bool pending_async = false;
3045
3046         BUG_ON(!node);
3047         binder_node_lock(node);
3048         if (oneway) {
3049                 BUG_ON(thread);
3050                 if (node->has_async_transaction)
3051                         pending_async = true;
3052                 else
3053                         node->has_async_transaction = true;
3054         }
3055
3056         binder_inner_proc_lock(proc);
3057
3058         if (proc->is_dead || (thread && thread->is_dead)) {
3059                 binder_inner_proc_unlock(proc);
3060                 binder_node_unlock(node);
3061                 return false;
3062         }
3063
3064         if (!thread && !pending_async)
3065                 thread = binder_select_thread_ilocked(proc);
3066
3067         if (thread)
3068                 binder_enqueue_thread_work_ilocked(thread, &t->work);
3069         else if (!pending_async)
3070                 binder_enqueue_work_ilocked(&t->work, &proc->todo);
3071         else
3072                 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
3073
3074         if (!pending_async)
3075                 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
3076
3077         binder_inner_proc_unlock(proc);
3078         binder_node_unlock(node);
3079
3080         return true;
3081 }
3082
3083 /**
3084  * binder_get_node_refs_for_txn() - Get required refs on node for txn
3085  * @node:         struct binder_node for which to get refs
3086  * @proc:         returns @node->proc if valid
3087  * @error:        if no @proc then returns BR_DEAD_REPLY
3088  *
3089  * User-space normally keeps the node alive when creating a transaction
3090  * since it has a reference to the target. The local strong ref keeps it
3091  * alive if the sending process dies before the target process processes
3092  * the transaction. If the source process is malicious or has a reference
3093  * counting bug, relying on the local strong ref can fail.
3094  *
3095  * Since user-space can cause the local strong ref to go away, we also take
3096  * a tmpref on the node to ensure it survives while we are constructing
3097  * the transaction. We also need a tmpref on the proc while we are
3098  * constructing the transaction, so we take that here as well.
3099  *
3100  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
3101  * Also sets @proc if valid. If the @node->proc is NULL indicating that the
3102  * target proc has died, @error is set to BR_DEAD_REPLY
3103  */
3104 static struct binder_node *binder_get_node_refs_for_txn(
3105                 struct binder_node *node,
3106                 struct binder_proc **procp,
3107                 uint32_t *error)
3108 {
3109         struct binder_node *target_node = NULL;
3110
3111         binder_node_inner_lock(node);
3112         if (node->proc) {
3113                 target_node = node;
3114                 binder_inc_node_nilocked(node, 1, 0, NULL);
3115                 binder_inc_node_tmpref_ilocked(node);
3116                 node->proc->tmp_ref++;
3117                 *procp = node->proc;
3118         } else
3119                 *error = BR_DEAD_REPLY;
3120         binder_node_inner_unlock(node);
3121
3122         return target_node;
3123 }
3124
3125 static void binder_transaction(struct binder_proc *proc,
3126                                struct binder_thread *thread,
3127                                struct binder_transaction_data *tr, int reply,
3128                                binder_size_t extra_buffers_size)
3129 {
3130         int ret;
3131         struct binder_transaction *t;
3132         struct binder_work *w;
3133         struct binder_work *tcomplete;
3134         binder_size_t buffer_offset = 0;
3135         binder_size_t off_start_offset, off_end_offset;
3136         binder_size_t off_min;
3137         binder_size_t sg_buf_offset, sg_buf_end_offset;
3138         binder_size_t user_offset = 0;
3139         struct binder_proc *target_proc = NULL;
3140         struct binder_thread *target_thread = NULL;
3141         struct binder_node *target_node = NULL;
3142         struct binder_transaction *in_reply_to = NULL;
3143         struct binder_transaction_log_entry *e;
3144         uint32_t return_error = 0;
3145         uint32_t return_error_param = 0;
3146         uint32_t return_error_line = 0;
3147         binder_size_t last_fixup_obj_off = 0;
3148         binder_size_t last_fixup_min_off = 0;
3149         struct binder_context *context = proc->context;
3150         int t_debug_id = atomic_inc_return(&binder_last_id);
3151         char *secctx = NULL;
3152         u32 secctx_sz = 0;
3153         struct list_head sgc_head;
3154         struct list_head pf_head;
3155         const void __user *user_buffer = (const void __user *)
3156                                 (uintptr_t)tr->data.ptr.buffer;
3157         INIT_LIST_HEAD(&sgc_head);
3158         INIT_LIST_HEAD(&pf_head);
3159
3160         e = binder_transaction_log_add(&binder_transaction_log);
3161         e->debug_id = t_debug_id;
3162         e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3163         e->from_proc = proc->pid;
3164         e->from_thread = thread->pid;
3165         e->target_handle = tr->target.handle;
3166         e->data_size = tr->data_size;
3167         e->offsets_size = tr->offsets_size;
3168         strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
3169
3170         if (reply) {
3171                 binder_inner_proc_lock(proc);
3172                 in_reply_to = thread->transaction_stack;
3173                 if (in_reply_to == NULL) {
3174                         binder_inner_proc_unlock(proc);
3175                         binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3176                                           proc->pid, thread->pid);
3177                         return_error = BR_FAILED_REPLY;
3178                         return_error_param = -EPROTO;
3179                         return_error_line = __LINE__;
3180                         goto err_empty_call_stack;
3181                 }
3182                 if (in_reply_to->to_thread != thread) {
3183                         spin_lock(&in_reply_to->lock);
3184                         binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3185                                 proc->pid, thread->pid, in_reply_to->debug_id,
3186                                 in_reply_to->to_proc ?
3187                                 in_reply_to->to_proc->pid : 0,
3188                                 in_reply_to->to_thread ?
3189                                 in_reply_to->to_thread->pid : 0);
3190                         spin_unlock(&in_reply_to->lock);
3191                         binder_inner_proc_unlock(proc);
3192                         return_error = BR_FAILED_REPLY;
3193                         return_error_param = -EPROTO;
3194                         return_error_line = __LINE__;
3195                         in_reply_to = NULL;
3196                         goto err_bad_call_stack;
3197                 }
3198                 thread->transaction_stack = in_reply_to->to_parent;
3199                 binder_inner_proc_unlock(proc);
3200                 binder_set_nice(in_reply_to->saved_priority);
3201                 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3202                 if (target_thread == NULL) {
3203                         /* annotation for sparse */
3204                         __release(&target_thread->proc->inner_lock);
3205                         return_error = BR_DEAD_REPLY;
3206                         return_error_line = __LINE__;
3207                         goto err_dead_binder;
3208                 }
3209                 if (target_thread->transaction_stack != in_reply_to) {
3210                         binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3211                                 proc->pid, thread->pid,
3212                                 target_thread->transaction_stack ?
3213                                 target_thread->transaction_stack->debug_id : 0,
3214                                 in_reply_to->debug_id);
3215                         binder_inner_proc_unlock(target_thread->proc);
3216                         return_error = BR_FAILED_REPLY;
3217                         return_error_param = -EPROTO;
3218                         return_error_line = __LINE__;
3219                         in_reply_to = NULL;
3220                         target_thread = NULL;
3221                         goto err_dead_binder;
3222                 }
3223                 target_proc = target_thread->proc;
3224                 target_proc->tmp_ref++;
3225                 binder_inner_proc_unlock(target_thread->proc);
3226         } else {
3227                 if (tr->target.handle) {
3228                         struct binder_ref *ref;
3229
3230                         /*
3231                          * There must already be a strong ref
3232                          * on this node. If so, do a strong
3233                          * increment on the node to ensure it
3234                          * stays alive until the transaction is
3235                          * done.
3236                          */
3237                         binder_proc_lock(proc);
3238                         ref = binder_get_ref_olocked(proc, tr->target.handle,
3239                                                      true);
3240                         if (ref) {
3241                                 target_node = binder_get_node_refs_for_txn(
3242                                                 ref->node, &target_proc,
3243                                                 &return_error);
3244                         } else {
3245                                 binder_user_error("%d:%d got transaction to invalid handle\n",
3246                                                   proc->pid, thread->pid);
3247                                 return_error = BR_FAILED_REPLY;
3248                         }
3249                         binder_proc_unlock(proc);
3250                 } else {
3251                         mutex_lock(&context->context_mgr_node_lock);
3252                         target_node = context->binder_context_mgr_node;
3253                         if (target_node)
3254                                 target_node = binder_get_node_refs_for_txn(
3255                                                 target_node, &target_proc,
3256                                                 &return_error);
3257                         else
3258                                 return_error = BR_DEAD_REPLY;
3259                         mutex_unlock(&context->context_mgr_node_lock);
3260                         if (target_node && target_proc->pid == proc->pid) {
3261                                 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3262                                                   proc->pid, thread->pid);
3263                                 return_error = BR_FAILED_REPLY;
3264                                 return_error_param = -EINVAL;
3265                                 return_error_line = __LINE__;
3266                                 goto err_invalid_target_handle;
3267                         }
3268                 }
3269                 if (!target_node) {
3270                         /*
3271                          * return_error is set above
3272                          */
3273                         return_error_param = -EINVAL;
3274                         return_error_line = __LINE__;
3275                         goto err_dead_binder;
3276                 }
3277                 e->to_node = target_node->debug_id;
3278                 if (WARN_ON(proc == target_proc)) {
3279                         return_error = BR_FAILED_REPLY;
3280                         return_error_param = -EINVAL;
3281                         return_error_line = __LINE__;
3282                         goto err_invalid_target_handle;
3283                 }
3284                 if (security_binder_transaction(proc->cred,
3285                                                 target_proc->cred) < 0) {
3286                         return_error = BR_FAILED_REPLY;
3287                         return_error_param = -EPERM;
3288                         return_error_line = __LINE__;
3289                         goto err_invalid_target_handle;
3290                 }
3291                 binder_inner_proc_lock(proc);
3292
3293                 w = list_first_entry_or_null(&thread->todo,
3294                                              struct binder_work, entry);
3295                 if (!(tr->flags & TF_ONE_WAY) && w &&
3296                     w->type == BINDER_WORK_TRANSACTION) {
3297                         /*
3298                          * Do not allow new outgoing transaction from a
3299                          * thread that has a transaction at the head of
3300                          * its todo list. Only need to check the head
3301                          * because binder_select_thread_ilocked picks a
3302                          * thread from proc->waiting_threads to enqueue
3303                          * the transaction, and nothing is queued to the
3304                          * todo list while the thread is on waiting_threads.
3305                          */
3306                         binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3307                                           proc->pid, thread->pid);
3308                         binder_inner_proc_unlock(proc);
3309                         return_error = BR_FAILED_REPLY;
3310                         return_error_param = -EPROTO;
3311                         return_error_line = __LINE__;
3312                         goto err_bad_todo_list;
3313                 }
3314
3315                 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3316                         struct binder_transaction *tmp;
3317
3318                         tmp = thread->transaction_stack;
3319                         if (tmp->to_thread != thread) {
3320                                 spin_lock(&tmp->lock);
3321                                 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3322                                         proc->pid, thread->pid, tmp->debug_id,
3323                                         tmp->to_proc ? tmp->to_proc->pid : 0,
3324                                         tmp->to_thread ?
3325                                         tmp->to_thread->pid : 0);
3326                                 spin_unlock(&tmp->lock);
3327                                 binder_inner_proc_unlock(proc);
3328                                 return_error = BR_FAILED_REPLY;
3329                                 return_error_param = -EPROTO;
3330                                 return_error_line = __LINE__;
3331                                 goto err_bad_call_stack;
3332                         }
3333                         while (tmp) {
3334                                 struct binder_thread *from;
3335
3336                                 spin_lock(&tmp->lock);
3337                                 from = tmp->from;
3338                                 if (from && from->proc == target_proc) {
3339                                         atomic_inc(&from->tmp_ref);
3340                                         target_thread = from;
3341                                         spin_unlock(&tmp->lock);
3342                                         break;
3343                                 }
3344                                 spin_unlock(&tmp->lock);
3345                                 tmp = tmp->from_parent;
3346                         }
3347                 }
3348                 binder_inner_proc_unlock(proc);
3349         }
3350         if (target_thread)
3351                 e->to_thread = target_thread->pid;
3352         e->to_proc = target_proc->pid;
3353
3354         /* TODO: reuse incoming transaction for reply */
3355         t = kzalloc(sizeof(*t), GFP_KERNEL);
3356         if (t == NULL) {
3357                 return_error = BR_FAILED_REPLY;
3358                 return_error_param = -ENOMEM;
3359                 return_error_line = __LINE__;
3360                 goto err_alloc_t_failed;
3361         }
3362         INIT_LIST_HEAD(&t->fd_fixups);
3363         binder_stats_created(BINDER_STAT_TRANSACTION);
3364         spin_lock_init(&t->lock);
3365
3366         tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3367         if (tcomplete == NULL) {
3368                 return_error = BR_FAILED_REPLY;
3369                 return_error_param = -ENOMEM;
3370                 return_error_line = __LINE__;
3371                 goto err_alloc_tcomplete_failed;
3372         }
3373         binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3374
3375         t->debug_id = t_debug_id;
3376
3377         if (reply)
3378                 binder_debug(BINDER_DEBUG_TRANSACTION,
3379                              "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3380                              proc->pid, thread->pid, t->debug_id,
3381                              target_proc->pid, target_thread->pid,
3382                              (u64)tr->data.ptr.buffer,
3383                              (u64)tr->data.ptr.offsets,
3384                              (u64)tr->data_size, (u64)tr->offsets_size,
3385                              (u64)extra_buffers_size);
3386         else
3387                 binder_debug(BINDER_DEBUG_TRANSACTION,
3388                              "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3389                              proc->pid, thread->pid, t->debug_id,
3390                              target_proc->pid, target_node->debug_id,
3391                              (u64)tr->data.ptr.buffer,
3392                              (u64)tr->data.ptr.offsets,
3393                              (u64)tr->data_size, (u64)tr->offsets_size,
3394                              (u64)extra_buffers_size);
3395
3396         if (!reply && !(tr->flags & TF_ONE_WAY))
3397                 t->from = thread;
3398         else
3399                 t->from = NULL;
3400         t->sender_euid = task_euid(proc->tsk);
3401         t->to_proc = target_proc;
3402         t->to_thread = target_thread;
3403         t->code = tr->code;
3404         t->flags = tr->flags;
3405         t->priority = task_nice(current);
3406
3407         if (target_node && target_node->txn_security_ctx) {
3408                 u32 secid;
3409                 size_t added_size;
3410
3411                 security_cred_getsecid(proc->cred, &secid);
3412                 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3413                 if (ret) {
3414                         return_error = BR_FAILED_REPLY;
3415                         return_error_param = ret;
3416                         return_error_line = __LINE__;
3417                         goto err_get_secctx_failed;
3418                 }
3419                 added_size = ALIGN(secctx_sz, sizeof(u64));
3420                 extra_buffers_size += added_size;
3421                 if (extra_buffers_size < added_size) {
3422                         /* integer overflow of extra_buffers_size */
3423                         return_error = BR_FAILED_REPLY;
3424                         return_error_param = EINVAL;
3425                         return_error_line = __LINE__;
3426                         goto err_bad_extra_size;
3427                 }
3428         }
3429
3430         trace_binder_transaction(reply, t, target_node);
3431
3432         t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3433                 tr->offsets_size, extra_buffers_size,
3434                 !reply && (t->flags & TF_ONE_WAY), current->tgid);
3435         if (IS_ERR(t->buffer)) {
3436                 /*
3437                  * -ESRCH indicates VMA cleared. The target is dying.
3438                  */
3439                 return_error_param = PTR_ERR(t->buffer);
3440                 return_error = return_error_param == -ESRCH ?
3441                         BR_DEAD_REPLY : BR_FAILED_REPLY;
3442                 return_error_line = __LINE__;
3443                 t->buffer = NULL;
3444                 goto err_binder_alloc_buf_failed;
3445         }
3446         if (secctx) {
3447                 int err;
3448                 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3449                                     ALIGN(tr->offsets_size, sizeof(void *)) +
3450                                     ALIGN(extra_buffers_size, sizeof(void *)) -
3451                                     ALIGN(secctx_sz, sizeof(u64));
3452
3453                 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3454                 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3455                                                   t->buffer, buf_offset,
3456                                                   secctx, secctx_sz);
3457                 if (err) {
3458                         t->security_ctx = 0;
3459                         WARN_ON(1);
3460                 }
3461                 security_release_secctx(secctx, secctx_sz);
3462                 secctx = NULL;
3463         }
3464         t->buffer->debug_id = t->debug_id;
3465         t->buffer->transaction = t;
3466         t->buffer->target_node = target_node;
3467         t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3468         trace_binder_transaction_alloc_buf(t->buffer);
3469
3470         if (binder_alloc_copy_user_to_buffer(
3471                                 &target_proc->alloc,
3472                                 t->buffer,
3473                                 ALIGN(tr->data_size, sizeof(void *)),
3474                                 (const void __user *)
3475                                         (uintptr_t)tr->data.ptr.offsets,
3476                                 tr->offsets_size)) {
3477                 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3478                                 proc->pid, thread->pid);
3479                 return_error = BR_FAILED_REPLY;
3480                 return_error_param = -EFAULT;
3481                 return_error_line = __LINE__;
3482                 goto err_copy_data_failed;
3483         }
3484         if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3485                 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3486                                 proc->pid, thread->pid, (u64)tr->offsets_size);
3487                 return_error = BR_FAILED_REPLY;
3488                 return_error_param = -EINVAL;
3489                 return_error_line = __LINE__;
3490                 goto err_bad_offset;
3491         }
3492         if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3493                 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3494                                   proc->pid, thread->pid,
3495                                   (u64)extra_buffers_size);
3496                 return_error = BR_FAILED_REPLY;
3497                 return_error_param = -EINVAL;
3498                 return_error_line = __LINE__;
3499                 goto err_bad_offset;
3500         }
3501         off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3502         buffer_offset = off_start_offset;
3503         off_end_offset = off_start_offset + tr->offsets_size;
3504         sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3505         sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3506                 ALIGN(secctx_sz, sizeof(u64));
3507         off_min = 0;
3508         for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3509              buffer_offset += sizeof(binder_size_t)) {
3510                 struct binder_object_header *hdr;
3511                 size_t object_size;
3512                 struct binder_object object;
3513                 binder_size_t object_offset;
3514                 binder_size_t copy_size;
3515
3516                 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3517                                                   &object_offset,
3518                                                   t->buffer,
3519                                                   buffer_offset,
3520                                                   sizeof(object_offset))) {
3521                         return_error = BR_FAILED_REPLY;
3522                         return_error_param = -EINVAL;
3523                         return_error_line = __LINE__;
3524                         goto err_bad_offset;
3525                 }
3526
3527                 /*
3528                  * Copy the source user buffer up to the next object
3529                  * that will be processed.
3530                  */
3531                 copy_size = object_offset - user_offset;
3532                 if (copy_size && (user_offset > object_offset ||
3533                                 binder_alloc_copy_user_to_buffer(
3534                                         &target_proc->alloc,
3535                                         t->buffer, user_offset,
3536                                         user_buffer + user_offset,
3537                                         copy_size))) {
3538                         binder_user_error("%d:%d got transaction with invalid data ptr\n",
3539                                         proc->pid, thread->pid);
3540                         return_error = BR_FAILED_REPLY;
3541                         return_error_param = -EFAULT;
3542                         return_error_line = __LINE__;
3543                         goto err_copy_data_failed;
3544                 }
3545                 object_size = binder_get_object(target_proc, user_buffer,
3546                                 t->buffer, object_offset, &object);
3547                 if (object_size == 0 || object_offset < off_min) {
3548                         binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3549                                           proc->pid, thread->pid,
3550                                           (u64)object_offset,
3551                                           (u64)off_min,
3552                                           (u64)t->buffer->data_size);
3553                         return_error = BR_FAILED_REPLY;
3554                         return_error_param = -EINVAL;
3555                         return_error_line = __LINE__;
3556                         goto err_bad_offset;
3557                 }
3558                 /*
3559                  * Set offset to the next buffer fragment to be
3560                  * copied
3561                  */
3562                 user_offset = object_offset + object_size;
3563
3564                 hdr = &object.hdr;
3565                 off_min = object_offset + object_size;
3566                 switch (hdr->type) {
3567                 case BINDER_TYPE_BINDER:
3568                 case BINDER_TYPE_WEAK_BINDER: {
3569                         struct flat_binder_object *fp;
3570
3571                         fp = to_flat_binder_object(hdr);
3572                         ret = binder_translate_binder(fp, t, thread);
3573
3574                         if (ret < 0 ||
3575                             binder_alloc_copy_to_buffer(&target_proc->alloc,
3576                                                         t->buffer,
3577                                                         object_offset,
3578                                                         fp, sizeof(*fp))) {
3579                                 return_error = BR_FAILED_REPLY;
3580                                 return_error_param = ret;
3581                                 return_error_line = __LINE__;
3582                                 goto err_translate_failed;
3583                         }
3584                 } break;
3585                 case BINDER_TYPE_HANDLE:
3586                 case BINDER_TYPE_WEAK_HANDLE: {
3587                         struct flat_binder_object *fp;
3588
3589                         fp = to_flat_binder_object(hdr);
3590                         ret = binder_translate_handle(fp, t, thread);
3591                         if (ret < 0 ||
3592                             binder_alloc_copy_to_buffer(&target_proc->alloc,
3593                                                         t->buffer,
3594                                                         object_offset,
3595                                                         fp, sizeof(*fp))) {
3596                                 return_error = BR_FAILED_REPLY;
3597                                 return_error_param = ret;
3598                                 return_error_line = __LINE__;
3599                                 goto err_translate_failed;
3600                         }
3601                 } break;
3602
3603                 case BINDER_TYPE_FD: {
3604                         struct binder_fd_object *fp = to_binder_fd_object(hdr);
3605                         binder_size_t fd_offset = object_offset +
3606                                 (uintptr_t)&fp->fd - (uintptr_t)fp;
3607                         int ret = binder_translate_fd(fp->fd, fd_offset, t,
3608                                                       thread, in_reply_to);
3609
3610                         fp->pad_binder = 0;
3611                         if (ret < 0 ||
3612                             binder_alloc_copy_to_buffer(&target_proc->alloc,
3613                                                         t->buffer,
3614                                                         object_offset,
3615                                                         fp, sizeof(*fp))) {
3616                                 return_error = BR_FAILED_REPLY;
3617                                 return_error_param = ret;
3618                                 return_error_line = __LINE__;
3619                                 goto err_translate_failed;
3620                         }
3621                 } break;
3622                 case BINDER_TYPE_FDA: {
3623                         struct binder_object ptr_object;
3624                         binder_size_t parent_offset;
3625                         struct binder_object user_object;
3626                         size_t user_parent_size;
3627                         struct binder_fd_array_object *fda =
3628                                 to_binder_fd_array_object(hdr);
3629                         size_t num_valid = (buffer_offset - off_start_offset) /
3630                                                 sizeof(binder_size_t);
3631                         struct binder_buffer_object *parent =
3632                                 binder_validate_ptr(target_proc, t->buffer,
3633                                                     &ptr_object, fda->parent,
3634                                                     off_start_offset,
3635                                                     &parent_offset,
3636                                                     num_valid);
3637                         if (!parent) {
3638                                 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3639                                                   proc->pid, thread->pid);
3640                                 return_error = BR_FAILED_REPLY;
3641                                 return_error_param = -EINVAL;
3642                                 return_error_line = __LINE__;
3643                                 goto err_bad_parent;
3644                         }
3645                         if (!binder_validate_fixup(target_proc, t->buffer,
3646                                                    off_start_offset,
3647                                                    parent_offset,
3648                                                    fda->parent_offset,
3649                                                    last_fixup_obj_off,
3650                                                    last_fixup_min_off)) {
3651                                 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3652                                                   proc->pid, thread->pid);
3653                                 return_error = BR_FAILED_REPLY;
3654                                 return_error_param = -EINVAL;
3655                                 return_error_line = __LINE__;
3656                                 goto err_bad_parent;
3657                         }
3658                         /*
3659                          * We need to read the user version of the parent
3660                          * object to get the original user offset
3661                          */
3662                         user_parent_size =
3663                                 binder_get_object(proc, user_buffer, t->buffer,
3664                                                   parent_offset, &user_object);
3665                         if (user_parent_size != sizeof(user_object.bbo)) {
3666                                 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3667                                                   proc->pid, thread->pid,
3668                                                   user_parent_size,
3669                                                   sizeof(user_object.bbo));
3670                                 return_error = BR_FAILED_REPLY;
3671                                 return_error_param = -EINVAL;
3672                                 return_error_line = __LINE__;
3673                                 goto err_bad_parent;
3674                         }
3675                         ret = binder_translate_fd_array(&pf_head, fda,
3676                                                         user_buffer, parent,
3677                                                         &user_object.bbo, t,
3678                                                         thread, in_reply_to);
3679                         if (!ret)
3680                                 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3681                                                                   t->buffer,
3682                                                                   object_offset,
3683                                                                   fda, sizeof(*fda));
3684                         if (ret) {
3685                                 return_error = BR_FAILED_REPLY;
3686                                 return_error_param = ret > 0 ? -EINVAL : ret;
3687                                 return_error_line = __LINE__;
3688                                 goto err_translate_failed;
3689                         }
3690                         last_fixup_obj_off = parent_offset;
3691                         last_fixup_min_off =
3692                                 fda->parent_offset + sizeof(u32) * fda->num_fds;
3693                 } break;
3694                 case BINDER_TYPE_PTR: {
3695                         struct binder_buffer_object *bp =
3696                                 to_binder_buffer_object(hdr);
3697                         size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3698                         size_t num_valid;
3699
3700                         if (bp->length > buf_left) {
3701                                 binder_user_error("%d:%d got transaction with too large buffer\n",
3702                                                   proc->pid, thread->pid);
3703                                 return_error = BR_FAILED_REPLY;
3704                                 return_error_param = -EINVAL;
3705                                 return_error_line = __LINE__;
3706                                 goto err_bad_offset;
3707                         }
3708                         ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3709                                 (const void __user *)(uintptr_t)bp->buffer,
3710                                 bp->length);
3711                         if (ret) {
3712                                 return_error = BR_FAILED_REPLY;
3713                                 return_error_param = ret;
3714                                 return_error_line = __LINE__;
3715                                 goto err_translate_failed;
3716                         }
3717                         /* Fixup buffer pointer to target proc address space */
3718                         bp->buffer = (uintptr_t)
3719                                 t->buffer->user_data + sg_buf_offset;
3720                         sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3721
3722                         num_valid = (buffer_offset - off_start_offset) /
3723                                         sizeof(binder_size_t);
3724                         ret = binder_fixup_parent(&pf_head, t,
3725                                                   thread, bp,
3726                                                   off_start_offset,
3727                                                   num_valid,
3728                                                   last_fixup_obj_off,
3729                                                   last_fixup_min_off);
3730                         if (ret < 0 ||
3731                             binder_alloc_copy_to_buffer(&target_proc->alloc,
3732                                                         t->buffer,
3733                                                         object_offset,
3734                                                         bp, sizeof(*bp))) {
3735                                 return_error = BR_FAILED_REPLY;
3736                                 return_error_param = ret;
3737                                 return_error_line = __LINE__;
3738                                 goto err_translate_failed;
3739                         }
3740                         last_fixup_obj_off = object_offset;
3741                         last_fixup_min_off = 0;
3742                 } break;
3743                 default:
3744                         binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3745                                 proc->pid, thread->pid, hdr->type);
3746                         return_error = BR_FAILED_REPLY;
3747                         return_error_param = -EINVAL;
3748                         return_error_line = __LINE__;
3749                         goto err_bad_object_type;
3750                 }
3751         }
3752         /* Done processing objects, copy the rest of the buffer */
3753         if (binder_alloc_copy_user_to_buffer(
3754                                 &target_proc->alloc,
3755                                 t->buffer, user_offset,
3756                                 user_buffer + user_offset,
3757                                 tr->data_size - user_offset)) {
3758                 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3759                                 proc->pid, thread->pid);
3760                 return_error = BR_FAILED_REPLY;
3761                 return_error_param = -EFAULT;
3762                 return_error_line = __LINE__;
3763                 goto err_copy_data_failed;
3764         }
3765
3766         ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3767                                             &sgc_head, &pf_head);
3768         if (ret) {
3769                 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3770                                   proc->pid, thread->pid);
3771                 return_error = BR_FAILED_REPLY;
3772                 return_error_param = ret;
3773                 return_error_line = __LINE__;
3774                 goto err_copy_data_failed;
3775         }
3776         tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3777         t->work.type = BINDER_WORK_TRANSACTION;
3778
3779         if (reply) {
3780                 binder_enqueue_thread_work(thread, tcomplete);
3781                 binder_inner_proc_lock(target_proc);
3782                 if (target_thread->is_dead) {
3783                         binder_inner_proc_unlock(target_proc);
3784                         goto err_dead_proc_or_thread;
3785                 }
3786                 BUG_ON(t->buffer->async_transaction != 0);
3787                 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3788                 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3789                 binder_inner_proc_unlock(target_proc);
3790                 wake_up_interruptible_sync(&target_thread->wait);
3791                 binder_free_transaction(in_reply_to);
3792         } else if (!(t->flags & TF_ONE_WAY)) {
3793                 BUG_ON(t->buffer->async_transaction != 0);
3794                 binder_inner_proc_lock(proc);
3795                 /*
3796                  * Defer the TRANSACTION_COMPLETE, so we don't return to
3797                  * userspace immediately; this allows the target process to
3798                  * immediately start processing this transaction, reducing
3799                  * latency. We will then return the TRANSACTION_COMPLETE when
3800                  * the target replies (or there is an error).
3801                  */
3802                 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3803                 t->need_reply = 1;
3804                 t->from_parent = thread->transaction_stack;
3805                 thread->transaction_stack = t;
3806                 binder_inner_proc_unlock(proc);
3807                 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3808                         binder_inner_proc_lock(proc);
3809                         binder_pop_transaction_ilocked(thread, t);
3810                         binder_inner_proc_unlock(proc);
3811                         goto err_dead_proc_or_thread;
3812                 }
3813         } else {
3814                 BUG_ON(target_node == NULL);
3815                 BUG_ON(t->buffer->async_transaction != 1);
3816                 binder_enqueue_thread_work(thread, tcomplete);
3817                 if (!binder_proc_transaction(t, target_proc, NULL))
3818                         goto err_dead_proc_or_thread;
3819         }
3820         if (target_thread)
3821                 binder_thread_dec_tmpref(target_thread);
3822         binder_proc_dec_tmpref(target_proc);
3823         if (target_node)
3824                 binder_dec_node_tmpref(target_node);
3825         /*
3826          * write barrier to synchronize with initialization
3827          * of log entry
3828          */
3829         smp_wmb();
3830         WRITE_ONCE(e->debug_id_done, t_debug_id);
3831         return;
3832
3833 err_dead_proc_or_thread:
3834         return_error = BR_DEAD_REPLY;
3835         return_error_line = __LINE__;
3836         binder_dequeue_work(proc, tcomplete);
3837 err_translate_failed:
3838 err_bad_object_type:
3839 err_bad_offset:
3840 err_bad_parent:
3841 err_copy_data_failed:
3842         binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3843         binder_free_txn_fixups(t);
3844         trace_binder_transaction_failed_buffer_release(t->buffer);
3845         binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3846                                           buffer_offset, true);
3847         if (target_node)
3848                 binder_dec_node_tmpref(target_node);
3849         target_node = NULL;
3850         t->buffer->transaction = NULL;
3851         binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3852 err_binder_alloc_buf_failed:
3853 err_bad_extra_size:
3854         if (secctx)
3855                 security_release_secctx(secctx, secctx_sz);
3856 err_get_secctx_failed:
3857         kfree(tcomplete);
3858         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3859 err_alloc_tcomplete_failed:
3860         kfree(t);
3861         binder_stats_deleted(BINDER_STAT_TRANSACTION);
3862 err_alloc_t_failed:
3863 err_bad_todo_list:
3864 err_bad_call_stack:
3865 err_empty_call_stack:
3866 err_dead_binder:
3867 err_invalid_target_handle:
3868         if (target_thread)
3869                 binder_thread_dec_tmpref(target_thread);
3870         if (target_proc)
3871                 binder_proc_dec_tmpref(target_proc);
3872         if (target_node) {
3873                 binder_dec_node(target_node, 1, 0);
3874                 binder_dec_node_tmpref(target_node);
3875         }
3876
3877         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3878                      "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3879                      proc->pid, thread->pid, return_error, return_error_param,
3880                      (u64)tr->data_size, (u64)tr->offsets_size,
3881                      return_error_line);
3882
3883         {
3884                 struct binder_transaction_log_entry *fe;
3885
3886                 e->return_error = return_error;
3887                 e->return_error_param = return_error_param;
3888                 e->return_error_line = return_error_line;
3889                 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3890                 *fe = *e;
3891                 /*
3892                  * write barrier to synchronize with initialization
3893                  * of log entry
3894                  */
3895                 smp_wmb();
3896                 WRITE_ONCE(e->debug_id_done, t_debug_id);
3897                 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3898         }
3899
3900         BUG_ON(thread->return_error.cmd != BR_OK);
3901         if (in_reply_to) {
3902                 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3903                 binder_enqueue_thread_work(thread, &thread->return_error.work);
3904                 binder_send_failed_reply(in_reply_to, return_error);
3905         } else {
3906                 thread->return_error.cmd = return_error;
3907                 binder_enqueue_thread_work(thread, &thread->return_error.work);
3908         }
3909 }
3910
3911 /**
3912  * binder_free_buf() - free the specified buffer
3913  * @proc:       binder proc that owns buffer
3914  * @buffer:     buffer to be freed
3915  * @is_failure: failed to send transaction
3916  *
3917  * If buffer for an async transaction, enqueue the next async
3918  * transaction from the node.
3919  *
3920  * Cleanup buffer and free it.
3921  */
3922 static void
3923 binder_free_buf(struct binder_proc *proc,
3924                 struct binder_thread *thread,
3925                 struct binder_buffer *buffer, bool is_failure)
3926 {
3927         binder_inner_proc_lock(proc);
3928         if (buffer->transaction) {
3929                 buffer->transaction->buffer = NULL;
3930                 buffer->transaction = NULL;
3931         }
3932         binder_inner_proc_unlock(proc);
3933         if (buffer->async_transaction && buffer->target_node) {
3934                 struct binder_node *buf_node;
3935                 struct binder_work *w;
3936
3937                 buf_node = buffer->target_node;
3938                 binder_node_inner_lock(buf_node);
3939                 BUG_ON(!buf_node->has_async_transaction);
3940                 BUG_ON(buf_node->proc != proc);
3941                 w = binder_dequeue_work_head_ilocked(
3942                                 &buf_node->async_todo);
3943                 if (!w) {
3944                         buf_node->has_async_transaction = false;
3945                 } else {
3946                         binder_enqueue_work_ilocked(
3947                                         w, &proc->todo);
3948                         binder_wakeup_proc_ilocked(proc);
3949                 }
3950                 binder_node_inner_unlock(buf_node);
3951         }
3952         trace_binder_transaction_buffer_release(buffer);
3953         binder_release_entire_buffer(proc, thread, buffer, is_failure);
3954         binder_alloc_free_buf(&proc->alloc, buffer);
3955 }
3956
3957 static int binder_thread_write(struct binder_proc *proc,
3958                         struct binder_thread *thread,
3959                         binder_uintptr_t binder_buffer, size_t size,
3960                         binder_size_t *consumed)
3961 {
3962         uint32_t cmd;
3963         struct binder_context *context = proc->context;
3964         void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3965         void __user *ptr = buffer + *consumed;
3966         void __user *end = buffer + size;
3967
3968         while (ptr < end && thread->return_error.cmd == BR_OK) {
3969                 int ret;
3970
3971                 if (get_user(cmd, (uint32_t __user *)ptr))
3972                         return -EFAULT;
3973                 ptr += sizeof(uint32_t);
3974                 trace_binder_command(cmd);
3975                 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3976                         atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3977                         atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3978                         atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3979                 }
3980                 switch (cmd) {
3981                 case BC_INCREFS:
3982                 case BC_ACQUIRE:
3983                 case BC_RELEASE:
3984                 case BC_DECREFS: {
3985                         uint32_t target;
3986                         const char *debug_string;
3987                         bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3988                         bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3989                         struct binder_ref_data rdata;
3990
3991                         if (get_user(target, (uint32_t __user *)ptr))
3992                                 return -EFAULT;
3993
3994                         ptr += sizeof(uint32_t);
3995                         ret = -1;
3996                         if (increment && !target) {
3997                                 struct binder_node *ctx_mgr_node;
3998                                 mutex_lock(&context->context_mgr_node_lock);
3999                                 ctx_mgr_node = context->binder_context_mgr_node;
4000                                 if (ctx_mgr_node) {
4001                                         if (ctx_mgr_node->proc == proc) {
4002                                                 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
4003                                                                   proc->pid, thread->pid);
4004                                                 mutex_unlock(&context->context_mgr_node_lock);
4005                                                 return -EINVAL;
4006                                         }
4007                                         ret = binder_inc_ref_for_node(
4008                                                         proc, ctx_mgr_node,
4009                                                         strong, NULL, &rdata);
4010                                 }
4011                                 mutex_unlock(&context->context_mgr_node_lock);
4012                         }
4013                         if (ret)
4014                                 ret = binder_update_ref_for_handle(
4015                                                 proc, target, increment, strong,
4016                                                 &rdata);
4017                         if (!ret && rdata.desc != target) {
4018                                 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
4019                                         proc->pid, thread->pid,
4020                                         target, rdata.desc);
4021                         }
4022                         switch (cmd) {
4023                         case BC_INCREFS:
4024                                 debug_string = "IncRefs";
4025                                 break;
4026                         case BC_ACQUIRE:
4027                                 debug_string = "Acquire";
4028                                 break;
4029                         case BC_RELEASE:
4030                                 debug_string = "Release";
4031                                 break;
4032                         case BC_DECREFS:
4033                         default:
4034                                 debug_string = "DecRefs";
4035                                 break;
4036                         }
4037                         if (ret) {
4038                                 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
4039                                         proc->pid, thread->pid, debug_string,
4040                                         strong, target, ret);
4041                                 break;
4042                         }
4043                         binder_debug(BINDER_DEBUG_USER_REFS,
4044                                      "%d:%d %s ref %d desc %d s %d w %d\n",
4045                                      proc->pid, thread->pid, debug_string,
4046                                      rdata.debug_id, rdata.desc, rdata.strong,
4047                                      rdata.weak);
4048                         break;
4049                 }
4050                 case BC_INCREFS_DONE:
4051                 case BC_ACQUIRE_DONE: {
4052                         binder_uintptr_t node_ptr;
4053                         binder_uintptr_t cookie;
4054                         struct binder_node *node;
4055                         bool free_node;
4056
4057                         if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
4058                                 return -EFAULT;
4059                         ptr += sizeof(binder_uintptr_t);
4060                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4061                                 return -EFAULT;
4062                         ptr += sizeof(binder_uintptr_t);
4063                         node = binder_get_node(proc, node_ptr);
4064                         if (node == NULL) {
4065                                 binder_user_error("%d:%d %s u%016llx no match\n",
4066                                         proc->pid, thread->pid,
4067                                         cmd == BC_INCREFS_DONE ?
4068                                         "BC_INCREFS_DONE" :
4069                                         "BC_ACQUIRE_DONE",
4070                                         (u64)node_ptr);
4071                                 break;
4072                         }
4073                         if (cookie != node->cookie) {
4074                                 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
4075                                         proc->pid, thread->pid,
4076                                         cmd == BC_INCREFS_DONE ?
4077                                         "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4078                                         (u64)node_ptr, node->debug_id,
4079                                         (u64)cookie, (u64)node->cookie);
4080                                 binder_put_node(node);
4081                                 break;
4082                         }
4083                         binder_node_inner_lock(node);
4084                         if (cmd == BC_ACQUIRE_DONE) {
4085                                 if (node->pending_strong_ref == 0) {
4086                                         binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4087                                                 proc->pid, thread->pid,
4088                                                 node->debug_id);
4089                                         binder_node_inner_unlock(node);
4090                                         binder_put_node(node);
4091                                         break;
4092                                 }
4093                                 node->pending_strong_ref = 0;
4094                         } else {
4095                                 if (node->pending_weak_ref == 0) {
4096                                         binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4097                                                 proc->pid, thread->pid,
4098                                                 node->debug_id);
4099                                         binder_node_inner_unlock(node);
4100                                         binder_put_node(node);
4101                                         break;
4102                                 }
4103                                 node->pending_weak_ref = 0;
4104                         }
4105                         free_node = binder_dec_node_nilocked(node,
4106                                         cmd == BC_ACQUIRE_DONE, 0);
4107                         WARN_ON(free_node);
4108                         binder_debug(BINDER_DEBUG_USER_REFS,
4109                                      "%d:%d %s node %d ls %d lw %d tr %d\n",
4110                                      proc->pid, thread->pid,
4111                                      cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4112                                      node->debug_id, node->local_strong_refs,
4113                                      node->local_weak_refs, node->tmp_refs);
4114                         binder_node_inner_unlock(node);
4115                         binder_put_node(node);
4116                         break;
4117                 }
4118                 case BC_ATTEMPT_ACQUIRE:
4119                         pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4120                         return -EINVAL;
4121                 case BC_ACQUIRE_RESULT:
4122                         pr_err("BC_ACQUIRE_RESULT not supported\n");
4123                         return -EINVAL;
4124
4125                 case BC_FREE_BUFFER: {
4126                         binder_uintptr_t data_ptr;
4127                         struct binder_buffer *buffer;
4128
4129                         if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4130                                 return -EFAULT;
4131                         ptr += sizeof(binder_uintptr_t);
4132
4133                         buffer = binder_alloc_prepare_to_free(&proc->alloc,
4134                                                               data_ptr);
4135                         if (IS_ERR_OR_NULL(buffer)) {
4136                                 if (PTR_ERR(buffer) == -EPERM) {
4137                                         binder_user_error(
4138                                                 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
4139                                                 proc->pid, thread->pid,
4140                                                 (u64)data_ptr);
4141                                 } else {
4142                                         binder_user_error(
4143                                                 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
4144                                                 proc->pid, thread->pid,
4145                                                 (u64)data_ptr);
4146                                 }
4147                                 break;
4148                         }
4149                         binder_debug(BINDER_DEBUG_FREE_BUFFER,
4150                                      "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
4151                                      proc->pid, thread->pid, (u64)data_ptr,
4152                                      buffer->debug_id,
4153                                      buffer->transaction ? "active" : "finished");
4154                         binder_free_buf(proc, thread, buffer, false);
4155                         break;
4156                 }
4157
4158                 case BC_TRANSACTION_SG:
4159                 case BC_REPLY_SG: {
4160                         struct binder_transaction_data_sg tr;
4161
4162                         if (copy_from_user(&tr, ptr, sizeof(tr)))
4163                                 return -EFAULT;
4164                         ptr += sizeof(tr);
4165                         binder_transaction(proc, thread, &tr.transaction_data,
4166                                            cmd == BC_REPLY_SG, tr.buffers_size);
4167                         break;
4168                 }
4169                 case BC_TRANSACTION:
4170                 case BC_REPLY: {
4171                         struct binder_transaction_data tr;
4172
4173                         if (copy_from_user(&tr, ptr, sizeof(tr)))
4174                                 return -EFAULT;
4175                         ptr += sizeof(tr);
4176                         binder_transaction(proc, thread, &tr,
4177                                            cmd == BC_REPLY, 0);
4178                         break;
4179                 }
4180
4181                 case BC_REGISTER_LOOPER:
4182                         binder_debug(BINDER_DEBUG_THREADS,
4183                                      "%d:%d BC_REGISTER_LOOPER\n",
4184                                      proc->pid, thread->pid);
4185                         binder_inner_proc_lock(proc);
4186                         if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4187                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4188                                 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4189                                         proc->pid, thread->pid);
4190                         } else if (proc->requested_threads == 0) {
4191                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4192                                 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4193                                         proc->pid, thread->pid);
4194                         } else {
4195                                 proc->requested_threads--;
4196                                 proc->requested_threads_started++;
4197                         }
4198                         thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4199                         binder_inner_proc_unlock(proc);
4200                         break;
4201                 case BC_ENTER_LOOPER:
4202                         binder_debug(BINDER_DEBUG_THREADS,
4203                                      "%d:%d BC_ENTER_LOOPER\n",
4204                                      proc->pid, thread->pid);
4205                         if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4206                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4207                                 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4208                                         proc->pid, thread->pid);
4209                         }
4210                         thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4211                         break;
4212                 case BC_EXIT_LOOPER:
4213                         binder_debug(BINDER_DEBUG_THREADS,
4214                                      "%d:%d BC_EXIT_LOOPER\n",
4215                                      proc->pid, thread->pid);
4216                         thread->looper |= BINDER_LOOPER_STATE_EXITED;
4217                         break;
4218
4219                 case BC_REQUEST_DEATH_NOTIFICATION:
4220                 case BC_CLEAR_DEATH_NOTIFICATION: {
4221                         uint32_t target;
4222                         binder_uintptr_t cookie;
4223                         struct binder_ref *ref;
4224                         struct binder_ref_death *death = NULL;
4225
4226                         if (get_user(target, (uint32_t __user *)ptr))
4227                                 return -EFAULT;
4228                         ptr += sizeof(uint32_t);
4229                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4230                                 return -EFAULT;
4231                         ptr += sizeof(binder_uintptr_t);
4232                         if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4233                                 /*
4234                                  * Allocate memory for death notification
4235                                  * before taking lock
4236                                  */
4237                                 death = kzalloc(sizeof(*death), GFP_KERNEL);
4238                                 if (death == NULL) {
4239                                         WARN_ON(thread->return_error.cmd !=
4240                                                 BR_OK);
4241                                         thread->return_error.cmd = BR_ERROR;
4242                                         binder_enqueue_thread_work(
4243                                                 thread,
4244                                                 &thread->return_error.work);
4245                                         binder_debug(
4246                                                 BINDER_DEBUG_FAILED_TRANSACTION,
4247                                                 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4248                                                 proc->pid, thread->pid);
4249                                         break;
4250                                 }
4251                         }
4252                         binder_proc_lock(proc);
4253                         ref = binder_get_ref_olocked(proc, target, false);
4254                         if (ref == NULL) {
4255                                 binder_user_error("%d:%d %s invalid ref %d\n",
4256                                         proc->pid, thread->pid,
4257                                         cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4258                                         "BC_REQUEST_DEATH_NOTIFICATION" :
4259                                         "BC_CLEAR_DEATH_NOTIFICATION",
4260                                         target);
4261                                 binder_proc_unlock(proc);
4262                                 kfree(death);
4263                                 break;
4264                         }
4265
4266                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4267                                      "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4268                                      proc->pid, thread->pid,
4269                                      cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4270                                      "BC_REQUEST_DEATH_NOTIFICATION" :
4271                                      "BC_CLEAR_DEATH_NOTIFICATION",
4272                                      (u64)cookie, ref->data.debug_id,
4273                                      ref->data.desc, ref->data.strong,
4274                                      ref->data.weak, ref->node->debug_id);
4275
4276                         binder_node_lock(ref->node);
4277                         if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4278                                 if (ref->death) {
4279                                         binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4280                                                 proc->pid, thread->pid);
4281                                         binder_node_unlock(ref->node);
4282                                         binder_proc_unlock(proc);
4283                                         kfree(death);
4284                                         break;
4285                                 }
4286                                 binder_stats_created(BINDER_STAT_DEATH);
4287                                 INIT_LIST_HEAD(&death->work.entry);
4288                                 death->cookie = cookie;
4289                                 ref->death = death;
4290                                 if (ref->node->proc == NULL) {
4291                                         ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4292
4293                                         binder_inner_proc_lock(proc);
4294                                         binder_enqueue_work_ilocked(
4295                                                 &ref->death->work, &proc->todo);
4296                                         binder_wakeup_proc_ilocked(proc);
4297                                         binder_inner_proc_unlock(proc);
4298                                 }
4299                         } else {
4300                                 if (ref->death == NULL) {
4301                                         binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4302                                                 proc->pid, thread->pid);
4303                                         binder_node_unlock(ref->node);
4304                                         binder_proc_unlock(proc);
4305                                         break;
4306                                 }
4307                                 death = ref->death;
4308                                 if (death->cookie != cookie) {
4309                                         binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4310                                                 proc->pid, thread->pid,
4311                                                 (u64)death->cookie,
4312                                                 (u64)cookie);
4313                                         binder_node_unlock(ref->node);
4314                                         binder_proc_unlock(proc);
4315                                         break;
4316                                 }
4317                                 ref->death = NULL;
4318                                 binder_inner_proc_lock(proc);
4319                                 if (list_empty(&death->work.entry)) {
4320                                         death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4321                                         if (thread->looper &
4322                                             (BINDER_LOOPER_STATE_REGISTERED |
4323                                              BINDER_LOOPER_STATE_ENTERED))
4324                                                 binder_enqueue_thread_work_ilocked(
4325                                                                 thread,
4326                                                                 &death->work);
4327                                         else {
4328                                                 binder_enqueue_work_ilocked(
4329                                                                 &death->work,
4330                                                                 &proc->todo);
4331                                                 binder_wakeup_proc_ilocked(
4332                                                                 proc);
4333                                         }
4334                                 } else {
4335                                         BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4336                                         death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4337                                 }
4338                                 binder_inner_proc_unlock(proc);
4339                         }
4340                         binder_node_unlock(ref->node);
4341                         binder_proc_unlock(proc);
4342                 } break;
4343                 case BC_DEAD_BINDER_DONE: {
4344                         struct binder_work *w;
4345                         binder_uintptr_t cookie;
4346                         struct binder_ref_death *death = NULL;
4347
4348                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4349                                 return -EFAULT;
4350
4351                         ptr += sizeof(cookie);
4352                         binder_inner_proc_lock(proc);
4353                         list_for_each_entry(w, &proc->delivered_death,
4354                                             entry) {
4355                                 struct binder_ref_death *tmp_death =
4356                                         container_of(w,
4357                                                      struct binder_ref_death,
4358                                                      work);
4359
4360                                 if (tmp_death->cookie == cookie) {
4361                                         death = tmp_death;
4362                                         break;
4363                                 }
4364                         }
4365                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
4366                                      "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4367                                      proc->pid, thread->pid, (u64)cookie,
4368                                      death);
4369                         if (death == NULL) {
4370                                 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4371                                         proc->pid, thread->pid, (u64)cookie);
4372                                 binder_inner_proc_unlock(proc);
4373                                 break;
4374                         }
4375                         binder_dequeue_work_ilocked(&death->work);
4376                         if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4377                                 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4378                                 if (thread->looper &
4379                                         (BINDER_LOOPER_STATE_REGISTERED |
4380                                          BINDER_LOOPER_STATE_ENTERED))
4381                                         binder_enqueue_thread_work_ilocked(
4382                                                 thread, &death->work);
4383                                 else {
4384                                         binder_enqueue_work_ilocked(
4385                                                         &death->work,
4386                                                         &proc->todo);
4387                                         binder_wakeup_proc_ilocked(proc);
4388                                 }
4389                         }
4390                         binder_inner_proc_unlock(proc);
4391                 } break;
4392
4393                 default:
4394                         pr_err("%d:%d unknown command %d\n",
4395                                proc->pid, thread->pid, cmd);
4396                         return -EINVAL;
4397                 }
4398                 *consumed = ptr - buffer;
4399         }
4400         return 0;
4401 }
4402
4403 static void binder_stat_br(struct binder_proc *proc,
4404                            struct binder_thread *thread, uint32_t cmd)
4405 {
4406         trace_binder_return(cmd);
4407         if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4408                 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4409                 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4410                 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4411         }
4412 }
4413
4414 static int binder_put_node_cmd(struct binder_proc *proc,
4415                                struct binder_thread *thread,
4416                                void __user **ptrp,
4417                                binder_uintptr_t node_ptr,
4418                                binder_uintptr_t node_cookie,
4419                                int node_debug_id,
4420                                uint32_t cmd, const char *cmd_name)
4421 {
4422         void __user *ptr = *ptrp;
4423
4424         if (put_user(cmd, (uint32_t __user *)ptr))
4425                 return -EFAULT;
4426         ptr += sizeof(uint32_t);
4427
4428         if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4429                 return -EFAULT;
4430         ptr += sizeof(binder_uintptr_t);
4431
4432         if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4433                 return -EFAULT;
4434         ptr += sizeof(binder_uintptr_t);
4435
4436         binder_stat_br(proc, thread, cmd);
4437         binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4438                      proc->pid, thread->pid, cmd_name, node_debug_id,
4439                      (u64)node_ptr, (u64)node_cookie);
4440
4441         *ptrp = ptr;
4442         return 0;
4443 }
4444
4445 static int binder_wait_for_work(struct binder_thread *thread,
4446                                 bool do_proc_work)
4447 {
4448         DEFINE_WAIT(wait);
4449         struct binder_proc *proc = thread->proc;
4450         int ret = 0;
4451
4452         freezer_do_not_count();
4453         binder_inner_proc_lock(proc);
4454         for (;;) {
4455                 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4456                 if (binder_has_work_ilocked(thread, do_proc_work))
4457                         break;
4458                 if (do_proc_work)
4459                         list_add(&thread->waiting_thread_node,
4460                                  &proc->waiting_threads);
4461                 binder_inner_proc_unlock(proc);
4462                 schedule();
4463                 binder_inner_proc_lock(proc);
4464                 list_del_init(&thread->waiting_thread_node);
4465                 if (signal_pending(current)) {
4466                         ret = -ERESTARTSYS;
4467                         break;
4468                 }
4469         }
4470         finish_wait(&thread->wait, &wait);
4471         binder_inner_proc_unlock(proc);
4472         freezer_count();
4473
4474         return ret;
4475 }
4476
4477 /**
4478  * binder_apply_fd_fixups() - finish fd translation
4479  * @proc:         binder_proc associated @t->buffer
4480  * @t:  binder transaction with list of fd fixups
4481  *
4482  * Now that we are in the context of the transaction target
4483  * process, we can allocate and install fds. Process the
4484  * list of fds to translate and fixup the buffer with the
4485  * new fds.
4486  *
4487  * If we fail to allocate an fd, then free the resources by
4488  * fput'ing files that have not been processed and ksys_close'ing
4489  * any fds that have already been allocated.
4490  */
4491 static int binder_apply_fd_fixups(struct binder_proc *proc,
4492                                   struct binder_transaction *t)
4493 {
4494         struct binder_txn_fd_fixup *fixup, *tmp;
4495         int ret = 0;
4496
4497         list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4498                 int fd = get_unused_fd_flags(O_CLOEXEC);
4499
4500                 if (fd < 0) {
4501                         binder_debug(BINDER_DEBUG_TRANSACTION,
4502                                      "failed fd fixup txn %d fd %d\n",
4503                                      t->debug_id, fd);
4504                         ret = -ENOMEM;
4505                         break;
4506                 }
4507                 binder_debug(BINDER_DEBUG_TRANSACTION,
4508                              "fd fixup txn %d fd %d\n",
4509                              t->debug_id, fd);
4510                 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4511                 fd_install(fd, fixup->file);
4512                 fixup->file = NULL;
4513                 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4514                                                 fixup->offset, &fd,
4515                                                 sizeof(u32))) {
4516                         ret = -EINVAL;
4517                         break;
4518                 }
4519         }
4520         list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4521                 if (fixup->file) {
4522                         fput(fixup->file);
4523                 } else if (ret) {
4524                         u32 fd;
4525                         int err;
4526
4527                         err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4528                                                             t->buffer,
4529                                                             fixup->offset,
4530                                                             sizeof(fd));
4531                         WARN_ON(err);
4532                         if (!err)
4533                                 binder_deferred_fd_close(fd);
4534                 }
4535                 list_del(&fixup->fixup_entry);
4536                 kfree(fixup);
4537         }
4538
4539         return ret;
4540 }
4541
4542 static int binder_thread_read(struct binder_proc *proc,
4543                               struct binder_thread *thread,
4544                               binder_uintptr_t binder_buffer, size_t size,
4545                               binder_size_t *consumed, int non_block)
4546 {
4547         void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4548         void __user *ptr = buffer + *consumed;
4549         void __user *end = buffer + size;
4550
4551         int ret = 0;
4552         int wait_for_proc_work;
4553
4554         if (*consumed == 0) {
4555                 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4556                         return -EFAULT;
4557                 ptr += sizeof(uint32_t);
4558         }
4559
4560 retry:
4561         binder_inner_proc_lock(proc);
4562         wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4563         binder_inner_proc_unlock(proc);
4564
4565         thread->looper |= BINDER_LOOPER_STATE_WAITING;
4566
4567         trace_binder_wait_for_work(wait_for_proc_work,
4568                                    !!thread->transaction_stack,
4569                                    !binder_worklist_empty(proc, &thread->todo));
4570         if (wait_for_proc_work) {
4571                 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4572                                         BINDER_LOOPER_STATE_ENTERED))) {
4573                         binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4574                                 proc->pid, thread->pid, thread->looper);
4575                         wait_event_interruptible(binder_user_error_wait,
4576                                                  binder_stop_on_user_error < 2);
4577                 }
4578                 binder_set_nice(proc->default_priority);
4579         }
4580
4581         if (non_block) {
4582                 if (!binder_has_work(thread, wait_for_proc_work))
4583                         ret = -EAGAIN;
4584         } else {
4585                 ret = binder_wait_for_work(thread, wait_for_proc_work);
4586         }
4587
4588         thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4589
4590         if (ret)
4591                 return ret;
4592
4593         while (1) {
4594                 uint32_t cmd;
4595                 struct binder_transaction_data_secctx tr;
4596                 struct binder_transaction_data *trd = &tr.transaction_data;
4597                 struct binder_work *w = NULL;
4598                 struct list_head *list = NULL;
4599                 struct binder_transaction *t = NULL;
4600                 struct binder_thread *t_from;
4601                 size_t trsize = sizeof(*trd);
4602
4603                 binder_inner_proc_lock(proc);
4604                 if (!binder_worklist_empty_ilocked(&thread->todo))
4605                         list = &thread->todo;
4606                 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4607                            wait_for_proc_work)
4608                         list = &proc->todo;
4609                 else {
4610                         binder_inner_proc_unlock(proc);
4611
4612                         /* no data added */
4613                         if (ptr - buffer == 4 && !thread->looper_need_return)
4614                                 goto retry;
4615                         break;
4616                 }
4617
4618                 if (end - ptr < sizeof(tr) + 4) {
4619                         binder_inner_proc_unlock(proc);
4620                         break;
4621                 }
4622                 w = binder_dequeue_work_head_ilocked(list);
4623                 if (binder_worklist_empty_ilocked(&thread->todo))
4624                         thread->process_todo = false;
4625
4626                 switch (w->type) {
4627                 case BINDER_WORK_TRANSACTION: {
4628                         binder_inner_proc_unlock(proc);
4629                         t = container_of(w, struct binder_transaction, work);
4630                 } break;
4631                 case BINDER_WORK_RETURN_ERROR: {
4632                         struct binder_error *e = container_of(
4633                                         w, struct binder_error, work);
4634
4635                         WARN_ON(e->cmd == BR_OK);
4636                         binder_inner_proc_unlock(proc);
4637                         if (put_user(e->cmd, (uint32_t __user *)ptr))
4638                                 return -EFAULT;
4639                         cmd = e->cmd;
4640                         e->cmd = BR_OK;
4641                         ptr += sizeof(uint32_t);
4642
4643                         binder_stat_br(proc, thread, cmd);
4644                 } break;
4645                 case BINDER_WORK_TRANSACTION_COMPLETE: {
4646                         binder_inner_proc_unlock(proc);
4647                         cmd = BR_TRANSACTION_COMPLETE;
4648                         kfree(w);
4649                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4650                         if (put_user(cmd, (uint32_t __user *)ptr))
4651                                 return -EFAULT;
4652                         ptr += sizeof(uint32_t);
4653
4654                         binder_stat_br(proc, thread, cmd);
4655                         binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4656                                      "%d:%d BR_TRANSACTION_COMPLETE\n",
4657                                      proc->pid, thread->pid);
4658                 } break;
4659                 case BINDER_WORK_NODE: {
4660                         struct binder_node *node = container_of(w, struct binder_node, work);
4661                         int strong, weak;
4662                         binder_uintptr_t node_ptr = node->ptr;
4663                         binder_uintptr_t node_cookie = node->cookie;
4664                         int node_debug_id = node->debug_id;
4665                         int has_weak_ref;
4666                         int has_strong_ref;
4667                         void __user *orig_ptr = ptr;
4668
4669                         BUG_ON(proc != node->proc);
4670                         strong = node->internal_strong_refs ||
4671                                         node->local_strong_refs;
4672                         weak = !hlist_empty(&node->refs) ||
4673                                         node->local_weak_refs ||
4674                                         node->tmp_refs || strong;
4675                         has_strong_ref = node->has_strong_ref;
4676                         has_weak_ref = node->has_weak_ref;
4677
4678                         if (weak && !has_weak_ref) {
4679                                 node->has_weak_ref = 1;
4680                                 node->pending_weak_ref = 1;
4681                                 node->local_weak_refs++;
4682                         }
4683                         if (strong && !has_strong_ref) {
4684                                 node->has_strong_ref = 1;
4685                                 node->pending_strong_ref = 1;
4686                                 node->local_strong_refs++;
4687                         }
4688                         if (!strong && has_strong_ref)
4689                                 node->has_strong_ref = 0;
4690                         if (!weak && has_weak_ref)
4691                                 node->has_weak_ref = 0;
4692                         if (!weak && !strong) {
4693                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4694                                              "%d:%d node %d u%016llx c%016llx deleted\n",
4695                                              proc->pid, thread->pid,
4696                                              node_debug_id,
4697                                              (u64)node_ptr,
4698                                              (u64)node_cookie);
4699                                 rb_erase(&node->rb_node, &proc->nodes);
4700                                 binder_inner_proc_unlock(proc);
4701                                 binder_node_lock(node);
4702                                 /*
4703                                  * Acquire the node lock before freeing the
4704                                  * node to serialize with other threads that
4705                                  * may have been holding the node lock while
4706                                  * decrementing this node (avoids race where
4707                                  * this thread frees while the other thread
4708                                  * is unlocking the node after the final
4709                                  * decrement)
4710                                  */
4711                                 binder_node_unlock(node);
4712                                 binder_free_node(node);
4713                         } else
4714                                 binder_inner_proc_unlock(proc);
4715
4716                         if (weak && !has_weak_ref)
4717                                 ret = binder_put_node_cmd(
4718                                                 proc, thread, &ptr, node_ptr,
4719                                                 node_cookie, node_debug_id,
4720                                                 BR_INCREFS, "BR_INCREFS");
4721                         if (!ret && strong && !has_strong_ref)
4722                                 ret = binder_put_node_cmd(
4723                                                 proc, thread, &ptr, node_ptr,
4724                                                 node_cookie, node_debug_id,
4725                                                 BR_ACQUIRE, "BR_ACQUIRE");
4726                         if (!ret && !strong && has_strong_ref)
4727                                 ret = binder_put_node_cmd(
4728                                                 proc, thread, &ptr, node_ptr,
4729                                                 node_cookie, node_debug_id,
4730                                                 BR_RELEASE, "BR_RELEASE");
4731                         if (!ret && !weak && has_weak_ref)
4732                                 ret = binder_put_node_cmd(
4733                                                 proc, thread, &ptr, node_ptr,
4734                                                 node_cookie, node_debug_id,
4735                                                 BR_DECREFS, "BR_DECREFS");
4736                         if (orig_ptr == ptr)
4737                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4738                                              "%d:%d node %d u%016llx c%016llx state unchanged\n",
4739                                              proc->pid, thread->pid,
4740                                              node_debug_id,
4741                                              (u64)node_ptr,
4742                                              (u64)node_cookie);
4743                         if (ret)
4744                                 return ret;
4745                 } break;
4746                 case BINDER_WORK_DEAD_BINDER:
4747                 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4748                 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4749                         struct binder_ref_death *death;
4750                         uint32_t cmd;
4751                         binder_uintptr_t cookie;
4752
4753                         death = container_of(w, struct binder_ref_death, work);
4754                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4755                                 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4756                         else
4757                                 cmd = BR_DEAD_BINDER;
4758                         cookie = death->cookie;
4759
4760                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4761                                      "%d:%d %s %016llx\n",
4762                                       proc->pid, thread->pid,
4763                                       cmd == BR_DEAD_BINDER ?
4764                                       "BR_DEAD_BINDER" :
4765                                       "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4766                                       (u64)cookie);
4767                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4768                                 binder_inner_proc_unlock(proc);
4769                                 kfree(death);
4770                                 binder_stats_deleted(BINDER_STAT_DEATH);
4771                         } else {
4772                                 binder_enqueue_work_ilocked(
4773                                                 w, &proc->delivered_death);
4774                                 binder_inner_proc_unlock(proc);
4775                         }
4776                         if (put_user(cmd, (uint32_t __user *)ptr))
4777                                 return -EFAULT;
4778                         ptr += sizeof(uint32_t);
4779                         if (put_user(cookie,
4780                                      (binder_uintptr_t __user *)ptr))
4781                                 return -EFAULT;
4782                         ptr += sizeof(binder_uintptr_t);
4783                         binder_stat_br(proc, thread, cmd);
4784                         if (cmd == BR_DEAD_BINDER)
4785                                 goto done; /* DEAD_BINDER notifications can cause transactions */
4786                 } break;
4787                 default:
4788                         binder_inner_proc_unlock(proc);
4789                         pr_err("%d:%d: bad work type %d\n",
4790                                proc->pid, thread->pid, w->type);
4791                         break;
4792                 }
4793
4794                 if (!t)
4795                         continue;
4796
4797                 BUG_ON(t->buffer == NULL);
4798                 if (t->buffer->target_node) {
4799                         struct binder_node *target_node = t->buffer->target_node;
4800
4801                         trd->target.ptr = target_node->ptr;
4802                         trd->cookie =  target_node->cookie;
4803                         t->saved_priority = task_nice(current);
4804                         if (t->priority < target_node->min_priority &&
4805                             !(t->flags & TF_ONE_WAY))
4806                                 binder_set_nice(t->priority);
4807                         else if (!(t->flags & TF_ONE_WAY) ||
4808                                  t->saved_priority > target_node->min_priority)
4809                                 binder_set_nice(target_node->min_priority);
4810                         cmd = BR_TRANSACTION;
4811                 } else {
4812                         trd->target.ptr = 0;
4813                         trd->cookie = 0;
4814                         cmd = BR_REPLY;
4815                 }
4816                 trd->code = t->code;
4817                 trd->flags = t->flags;
4818                 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4819
4820                 t_from = binder_get_txn_from(t);
4821                 if (t_from) {
4822                         struct task_struct *sender = t_from->proc->tsk;
4823
4824                         trd->sender_pid =
4825                                 task_tgid_nr_ns(sender,
4826                                                 task_active_pid_ns(current));
4827                 } else {
4828                         trd->sender_pid = 0;
4829                 }
4830
4831                 ret = binder_apply_fd_fixups(proc, t);
4832                 if (ret) {
4833                         struct binder_buffer *buffer = t->buffer;
4834                         bool oneway = !!(t->flags & TF_ONE_WAY);
4835                         int tid = t->debug_id;
4836
4837                         if (t_from)
4838                                 binder_thread_dec_tmpref(t_from);
4839                         buffer->transaction = NULL;
4840                         binder_cleanup_transaction(t, "fd fixups failed",
4841                                                    BR_FAILED_REPLY);
4842                         binder_free_buf(proc, thread, buffer, true);
4843                         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4844                                      "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4845                                      proc->pid, thread->pid,
4846                                      oneway ? "async " :
4847                                         (cmd == BR_REPLY ? "reply " : ""),
4848                                      tid, BR_FAILED_REPLY, ret, __LINE__);
4849                         if (cmd == BR_REPLY) {
4850                                 cmd = BR_FAILED_REPLY;
4851                                 if (put_user(cmd, (uint32_t __user *)ptr))
4852                                         return -EFAULT;
4853                                 ptr += sizeof(uint32_t);
4854                                 binder_stat_br(proc, thread, cmd);
4855                                 break;
4856                         }
4857                         continue;
4858                 }
4859                 trd->data_size = t->buffer->data_size;
4860                 trd->offsets_size = t->buffer->offsets_size;
4861                 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4862                 trd->data.ptr.offsets = trd->data.ptr.buffer +
4863                                         ALIGN(t->buffer->data_size,
4864                                             sizeof(void *));
4865
4866                 tr.secctx = t->security_ctx;
4867                 if (t->security_ctx) {
4868                         cmd = BR_TRANSACTION_SEC_CTX;
4869                         trsize = sizeof(tr);
4870                 }
4871                 if (put_user(cmd, (uint32_t __user *)ptr)) {
4872                         if (t_from)
4873                                 binder_thread_dec_tmpref(t_from);
4874
4875                         binder_cleanup_transaction(t, "put_user failed",
4876                                                    BR_FAILED_REPLY);
4877
4878                         return -EFAULT;
4879                 }
4880                 ptr += sizeof(uint32_t);
4881                 if (copy_to_user(ptr, &tr, trsize)) {
4882                         if (t_from)
4883                                 binder_thread_dec_tmpref(t_from);
4884
4885                         binder_cleanup_transaction(t, "copy_to_user failed",
4886                                                    BR_FAILED_REPLY);
4887
4888                         return -EFAULT;
4889                 }
4890                 ptr += trsize;
4891
4892                 trace_binder_transaction_received(t);
4893                 binder_stat_br(proc, thread, cmd);
4894                 binder_debug(BINDER_DEBUG_TRANSACTION,
4895                              "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4896                              proc->pid, thread->pid,
4897                              (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4898                                 (cmd == BR_TRANSACTION_SEC_CTX) ?
4899                                      "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4900                              t->debug_id, t_from ? t_from->proc->pid : 0,
4901                              t_from ? t_from->pid : 0, cmd,
4902                              t->buffer->data_size, t->buffer->offsets_size,
4903                              (u64)trd->data.ptr.buffer,
4904                              (u64)trd->data.ptr.offsets);
4905
4906                 if (t_from)
4907                         binder_thread_dec_tmpref(t_from);
4908                 t->buffer->allow_user_free = 1;
4909                 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4910                         binder_inner_proc_lock(thread->proc);
4911                         t->to_parent = thread->transaction_stack;
4912                         t->to_thread = thread;
4913                         thread->transaction_stack = t;
4914                         binder_inner_proc_unlock(thread->proc);
4915                 } else {
4916                         binder_free_transaction(t);
4917                 }
4918                 break;
4919         }
4920
4921 done:
4922
4923         *consumed = ptr - buffer;
4924         binder_inner_proc_lock(proc);
4925         if (proc->requested_threads == 0 &&
4926             list_empty(&thread->proc->waiting_threads) &&
4927             proc->requested_threads_started < proc->max_threads &&
4928             (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4929              BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4930              /*spawn a new thread if we leave this out */) {
4931                 proc->requested_threads++;
4932                 binder_inner_proc_unlock(proc);
4933                 binder_debug(BINDER_DEBUG_THREADS,
4934                              "%d:%d BR_SPAWN_LOOPER\n",
4935                              proc->pid, thread->pid);
4936                 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4937                         return -EFAULT;
4938                 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4939         } else
4940                 binder_inner_proc_unlock(proc);
4941         return 0;
4942 }
4943
4944 static void binder_release_work(struct binder_proc *proc,
4945                                 struct list_head *list)
4946 {
4947         struct binder_work *w;
4948         enum binder_work_type wtype;
4949
4950         while (1) {
4951                 binder_inner_proc_lock(proc);
4952                 w = binder_dequeue_work_head_ilocked(list);
4953                 wtype = w ? w->type : 0;
4954                 binder_inner_proc_unlock(proc);
4955                 if (!w)
4956                         return;
4957
4958                 switch (wtype) {
4959                 case BINDER_WORK_TRANSACTION: {
4960                         struct binder_transaction *t;
4961
4962                         t = container_of(w, struct binder_transaction, work);
4963
4964                         binder_cleanup_transaction(t, "process died.",
4965                                                    BR_DEAD_REPLY);
4966                 } break;
4967                 case BINDER_WORK_RETURN_ERROR: {
4968                         struct binder_error *e = container_of(
4969                                         w, struct binder_error, work);
4970
4971                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4972                                 "undelivered TRANSACTION_ERROR: %u\n",
4973                                 e->cmd);
4974                 } break;
4975                 case BINDER_WORK_TRANSACTION_COMPLETE: {
4976                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4977                                 "undelivered TRANSACTION_COMPLETE\n");
4978                         kfree(w);
4979                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4980                 } break;
4981                 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4982                 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4983                         struct binder_ref_death *death;
4984
4985                         death = container_of(w, struct binder_ref_death, work);
4986                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4987                                 "undelivered death notification, %016llx\n",
4988                                 (u64)death->cookie);
4989                         kfree(death);
4990                         binder_stats_deleted(BINDER_STAT_DEATH);
4991                 } break;
4992                 case BINDER_WORK_NODE:
4993                         break;
4994                 default:
4995                         pr_err("unexpected work type, %d, not freed\n",
4996                                wtype);
4997                         break;
4998                 }
4999         }
5000
5001 }
5002
5003 static struct binder_thread *binder_get_thread_ilocked(
5004                 struct binder_proc *proc, struct binder_thread *new_thread)
5005 {
5006         struct binder_thread *thread = NULL;
5007         struct rb_node *parent = NULL;
5008         struct rb_node **p = &proc->threads.rb_node;
5009
5010         while (*p) {
5011                 parent = *p;
5012                 thread = rb_entry(parent, struct binder_thread, rb_node);
5013
5014                 if (current->pid < thread->pid)
5015                         p = &(*p)->rb_left;
5016                 else if (current->pid > thread->pid)
5017                         p = &(*p)->rb_right;
5018                 else
5019                         return thread;
5020         }
5021         if (!new_thread)
5022                 return NULL;
5023         thread = new_thread;
5024         binder_stats_created(BINDER_STAT_THREAD);
5025         thread->proc = proc;
5026         thread->pid = current->pid;
5027         atomic_set(&thread->tmp_ref, 0);
5028         init_waitqueue_head(&thread->wait);
5029         INIT_LIST_HEAD(&thread->todo);
5030         rb_link_node(&thread->rb_node, parent, p);
5031         rb_insert_color(&thread->rb_node, &proc->threads);
5032         thread->looper_need_return = true;
5033         thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
5034         thread->return_error.cmd = BR_OK;
5035         thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
5036         thread->reply_error.cmd = BR_OK;
5037         INIT_LIST_HEAD(&new_thread->waiting_thread_node);
5038         return thread;
5039 }
5040
5041 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
5042 {
5043         struct binder_thread *thread;
5044         struct binder_thread *new_thread;
5045
5046         binder_inner_proc_lock(proc);
5047         thread = binder_get_thread_ilocked(proc, NULL);
5048         binder_inner_proc_unlock(proc);
5049         if (!thread) {
5050                 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
5051                 if (new_thread == NULL)
5052                         return NULL;
5053                 binder_inner_proc_lock(proc);
5054                 thread = binder_get_thread_ilocked(proc, new_thread);
5055                 binder_inner_proc_unlock(proc);
5056                 if (thread != new_thread)
5057                         kfree(new_thread);
5058         }
5059         return thread;
5060 }
5061
5062 static void binder_free_proc(struct binder_proc *proc)
5063 {
5064         struct binder_device *device;
5065
5066         BUG_ON(!list_empty(&proc->todo));
5067         BUG_ON(!list_empty(&proc->delivered_death));
5068         device = container_of(proc->context, struct binder_device, context);
5069         if (refcount_dec_and_test(&device->ref)) {
5070                 kfree(proc->context->name);
5071                 kfree(device);
5072         }
5073         binder_alloc_deferred_release(&proc->alloc);
5074         put_task_struct(proc->tsk);
5075         put_cred(proc->cred);
5076         binder_stats_deleted(BINDER_STAT_PROC);
5077         kfree(proc);
5078 }
5079
5080 static void binder_free_thread(struct binder_thread *thread)
5081 {
5082         BUG_ON(!list_empty(&thread->todo));
5083         binder_stats_deleted(BINDER_STAT_THREAD);
5084         binder_proc_dec_tmpref(thread->proc);
5085         kfree(thread);
5086 }
5087
5088 static int binder_thread_release(struct binder_proc *proc,
5089                                  struct binder_thread *thread)
5090 {
5091         struct binder_transaction *t;
5092         struct binder_transaction *send_reply = NULL;
5093         int active_transactions = 0;
5094         struct binder_transaction *last_t = NULL;
5095
5096         binder_inner_proc_lock(thread->proc);
5097         /*
5098          * take a ref on the proc so it survives
5099          * after we remove this thread from proc->threads.
5100          * The corresponding dec is when we actually
5101          * free the thread in binder_free_thread()
5102          */
5103         proc->tmp_ref++;
5104         /*
5105          * take a ref on this thread to ensure it
5106          * survives while we are releasing it
5107          */
5108         atomic_inc(&thread->tmp_ref);
5109         rb_erase(&thread->rb_node, &proc->threads);
5110         t = thread->transaction_stack;
5111         if (t) {
5112                 spin_lock(&t->lock);
5113                 if (t->to_thread == thread)
5114                         send_reply = t;
5115         } else {
5116                 __acquire(&t->lock);
5117         }
5118         thread->is_dead = true;
5119
5120         while (t) {
5121                 last_t = t;
5122                 active_transactions++;
5123                 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5124                              "release %d:%d transaction %d %s, still active\n",
5125                               proc->pid, thread->pid,
5126                              t->debug_id,
5127                              (t->to_thread == thread) ? "in" : "out");
5128
5129                 if (t->to_thread == thread) {
5130                         t->to_proc = NULL;
5131                         t->to_thread = NULL;
5132                         if (t->buffer) {
5133                                 t->buffer->transaction = NULL;
5134                                 t->buffer = NULL;
5135                         }
5136                         t = t->to_parent;
5137                 } else if (t->from == thread) {
5138                         t->from = NULL;
5139                         t = t->from_parent;
5140                 } else
5141                         BUG();
5142                 spin_unlock(&last_t->lock);
5143                 if (t)
5144                         spin_lock(&t->lock);
5145                 else
5146                         __acquire(&t->lock);
5147         }
5148         /* annotation for sparse, lock not acquired in last iteration above */
5149         __release(&t->lock);
5150
5151         /*
5152          * If this thread used poll, make sure we remove the waitqueue from any
5153          * poll data structures holding it.
5154          */
5155         if (thread->looper & BINDER_LOOPER_STATE_POLL)
5156                 wake_up_pollfree(&thread->wait);
5157
5158         binder_inner_proc_unlock(thread->proc);
5159
5160         /*
5161          * This is needed to avoid races between wake_up_pollfree() above and
5162          * someone else removing the last entry from the queue for other reasons
5163          * (e.g. ep_remove_wait_queue() being called due to an epoll file
5164          * descriptor being closed).  Such other users hold an RCU read lock, so
5165          * we can be sure they're done after we call synchronize_rcu().
5166          */
5167         if (thread->looper & BINDER_LOOPER_STATE_POLL)
5168                 synchronize_rcu();
5169
5170         if (send_reply)
5171                 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5172         binder_release_work(proc, &thread->todo);
5173         binder_thread_dec_tmpref(thread);
5174         return active_transactions;
5175 }
5176
5177 static __poll_t binder_poll(struct file *filp,
5178                                 struct poll_table_struct *wait)
5179 {
5180         struct binder_proc *proc = filp->private_data;
5181         struct binder_thread *thread = NULL;
5182         bool wait_for_proc_work;
5183
5184         thread = binder_get_thread(proc);
5185         if (!thread)
5186                 return EPOLLERR;
5187
5188         binder_inner_proc_lock(thread->proc);
5189         thread->looper |= BINDER_LOOPER_STATE_POLL;
5190         wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5191
5192         binder_inner_proc_unlock(thread->proc);
5193
5194         poll_wait(filp, &thread->wait, wait);
5195
5196         if (binder_has_work(thread, wait_for_proc_work))
5197                 return EPOLLIN;
5198
5199         return 0;
5200 }
5201
5202 static int binder_ioctl_write_read(struct file *filp,
5203                                 unsigned int cmd, unsigned long arg,
5204                                 struct binder_thread *thread)
5205 {
5206         int ret = 0;
5207         struct binder_proc *proc = filp->private_data;
5208         unsigned int size = _IOC_SIZE(cmd);
5209         void __user *ubuf = (void __user *)arg;
5210         struct binder_write_read bwr;
5211
5212         if (size != sizeof(struct binder_write_read)) {
5213                 ret = -EINVAL;
5214                 goto out;
5215         }
5216         if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5217                 ret = -EFAULT;
5218                 goto out;
5219         }
5220         binder_debug(BINDER_DEBUG_READ_WRITE,
5221                      "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5222                      proc->pid, thread->pid,
5223                      (u64)bwr.write_size, (u64)bwr.write_buffer,
5224                      (u64)bwr.read_size, (u64)bwr.read_buffer);
5225
5226         if (bwr.write_size > 0) {
5227                 ret = binder_thread_write(proc, thread,
5228                                           bwr.write_buffer,
5229                                           bwr.write_size,
5230                                           &bwr.write_consumed);
5231                 trace_binder_write_done(ret);
5232                 if (ret < 0) {
5233                         bwr.read_consumed = 0;
5234                         if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5235                                 ret = -EFAULT;
5236                         goto out;
5237                 }
5238         }
5239         if (bwr.read_size > 0) {
5240                 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5241                                          bwr.read_size,
5242                                          &bwr.read_consumed,
5243                                          filp->f_flags & O_NONBLOCK);
5244                 trace_binder_read_done(ret);
5245                 binder_inner_proc_lock(proc);
5246                 if (!binder_worklist_empty_ilocked(&proc->todo))
5247                         binder_wakeup_proc_ilocked(proc);
5248                 binder_inner_proc_unlock(proc);
5249                 if (ret < 0) {
5250                         if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5251                                 ret = -EFAULT;
5252                         goto out;
5253                 }
5254         }
5255         binder_debug(BINDER_DEBUG_READ_WRITE,
5256                      "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5257                      proc->pid, thread->pid,
5258                      (u64)bwr.write_consumed, (u64)bwr.write_size,
5259                      (u64)bwr.read_consumed, (u64)bwr.read_size);
5260         if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5261                 ret = -EFAULT;
5262                 goto out;
5263         }
5264 out:
5265         return ret;
5266 }
5267
5268 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5269                                     struct flat_binder_object *fbo)
5270 {
5271         int ret = 0;
5272         struct binder_proc *proc = filp->private_data;
5273         struct binder_context *context = proc->context;
5274         struct binder_node *new_node;
5275         kuid_t curr_euid = current_euid();
5276
5277         mutex_lock(&context->context_mgr_node_lock);
5278         if (context->binder_context_mgr_node) {
5279                 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5280                 ret = -EBUSY;
5281                 goto out;
5282         }
5283         ret = security_binder_set_context_mgr(proc->cred);
5284         if (ret < 0)
5285                 goto out;
5286         if (uid_valid(context->binder_context_mgr_uid)) {
5287                 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5288                         pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5289                                from_kuid(&init_user_ns, curr_euid),
5290                                from_kuid(&init_user_ns,
5291                                          context->binder_context_mgr_uid));
5292                         ret = -EPERM;
5293                         goto out;
5294                 }
5295         } else {
5296                 context->binder_context_mgr_uid = curr_euid;
5297         }
5298         new_node = binder_new_node(proc, fbo);
5299         if (!new_node) {
5300                 ret = -ENOMEM;
5301                 goto out;
5302         }
5303         binder_node_lock(new_node);
5304         new_node->local_weak_refs++;
5305         new_node->local_strong_refs++;
5306         new_node->has_strong_ref = 1;
5307         new_node->has_weak_ref = 1;
5308         context->binder_context_mgr_node = new_node;
5309         binder_node_unlock(new_node);
5310         binder_put_node(new_node);
5311 out:
5312         mutex_unlock(&context->context_mgr_node_lock);
5313         return ret;
5314 }
5315
5316 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5317                 struct binder_node_info_for_ref *info)
5318 {
5319         struct binder_node *node;
5320         struct binder_context *context = proc->context;
5321         __u32 handle = info->handle;
5322
5323         if (info->strong_count || info->weak_count || info->reserved1 ||
5324             info->reserved2 || info->reserved3) {
5325                 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5326                                   proc->pid);
5327                 return -EINVAL;
5328         }
5329
5330         /* This ioctl may only be used by the context manager */
5331         mutex_lock(&context->context_mgr_node_lock);
5332         if (!context->binder_context_mgr_node ||
5333                 context->binder_context_mgr_node->proc != proc) {
5334                 mutex_unlock(&context->context_mgr_node_lock);
5335                 return -EPERM;
5336         }
5337         mutex_unlock(&context->context_mgr_node_lock);
5338
5339         node = binder_get_node_from_ref(proc, handle, true, NULL);
5340         if (!node)
5341                 return -EINVAL;
5342
5343         info->strong_count = node->local_strong_refs +
5344                 node->internal_strong_refs;
5345         info->weak_count = node->local_weak_refs;
5346
5347         binder_put_node(node);
5348
5349         return 0;
5350 }
5351
5352 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5353                                 struct binder_node_debug_info *info)
5354 {
5355         struct rb_node *n;
5356         binder_uintptr_t ptr = info->ptr;
5357
5358         memset(info, 0, sizeof(*info));
5359
5360         binder_inner_proc_lock(proc);
5361         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5362                 struct binder_node *node = rb_entry(n, struct binder_node,
5363                                                     rb_node);
5364                 if (node->ptr > ptr) {
5365                         info->ptr = node->ptr;
5366                         info->cookie = node->cookie;
5367                         info->has_strong_ref = node->has_strong_ref;
5368                         info->has_weak_ref = node->has_weak_ref;
5369                         break;
5370                 }
5371         }
5372         binder_inner_proc_unlock(proc);
5373
5374         return 0;
5375 }
5376
5377 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5378 {
5379         int ret;
5380         struct binder_proc *proc = filp->private_data;
5381         struct binder_thread *thread;
5382         unsigned int size = _IOC_SIZE(cmd);
5383         void __user *ubuf = (void __user *)arg;
5384
5385         /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5386                         proc->pid, current->pid, cmd, arg);*/
5387
5388         binder_selftest_alloc(&proc->alloc);
5389
5390         trace_binder_ioctl(cmd, arg);
5391
5392         ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5393         if (ret)
5394                 goto err_unlocked;
5395
5396         thread = binder_get_thread(proc);
5397         if (thread == NULL) {
5398                 ret = -ENOMEM;
5399                 goto err;
5400         }
5401
5402         switch (cmd) {
5403         case BINDER_WRITE_READ:
5404                 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5405                 if (ret)
5406                         goto err;
5407                 break;
5408         case BINDER_SET_MAX_THREADS: {
5409                 int max_threads;
5410
5411                 if (copy_from_user(&max_threads, ubuf,
5412                                    sizeof(max_threads))) {
5413                         ret = -EINVAL;
5414                         goto err;
5415                 }
5416                 binder_inner_proc_lock(proc);
5417                 proc->max_threads = max_threads;
5418                 binder_inner_proc_unlock(proc);
5419                 break;
5420         }
5421         case BINDER_SET_CONTEXT_MGR_EXT: {
5422                 struct flat_binder_object fbo;
5423
5424                 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5425                         ret = -EINVAL;
5426                         goto err;
5427                 }
5428                 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5429                 if (ret)
5430                         goto err;
5431                 break;
5432         }
5433         case BINDER_SET_CONTEXT_MGR:
5434                 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5435                 if (ret)
5436                         goto err;
5437                 break;
5438         case BINDER_THREAD_EXIT:
5439                 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5440                              proc->pid, thread->pid);
5441                 binder_thread_release(proc, thread);
5442                 thread = NULL;
5443                 break;
5444         case BINDER_VERSION: {
5445                 struct binder_version __user *ver = ubuf;
5446
5447                 if (size != sizeof(struct binder_version)) {
5448                         ret = -EINVAL;
5449                         goto err;
5450                 }
5451                 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5452                              &ver->protocol_version)) {
5453                         ret = -EINVAL;
5454                         goto err;
5455                 }
5456                 break;
5457         }
5458         case BINDER_GET_NODE_INFO_FOR_REF: {
5459                 struct binder_node_info_for_ref info;
5460
5461                 if (copy_from_user(&info, ubuf, sizeof(info))) {
5462                         ret = -EFAULT;
5463                         goto err;
5464                 }
5465
5466                 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5467                 if (ret < 0)
5468                         goto err;
5469
5470                 if (copy_to_user(ubuf, &info, sizeof(info))) {
5471                         ret = -EFAULT;
5472                         goto err;
5473                 }
5474
5475                 break;
5476         }
5477         case BINDER_GET_NODE_DEBUG_INFO: {
5478                 struct binder_node_debug_info info;
5479
5480                 if (copy_from_user(&info, ubuf, sizeof(info))) {
5481                         ret = -EFAULT;
5482                         goto err;
5483                 }
5484
5485                 ret = binder_ioctl_get_node_debug_info(proc, &info);
5486                 if (ret < 0)
5487                         goto err;
5488
5489                 if (copy_to_user(ubuf, &info, sizeof(info))) {
5490                         ret = -EFAULT;
5491                         goto err;
5492                 }
5493                 break;
5494         }
5495         default:
5496                 ret = -EINVAL;
5497                 goto err;
5498         }
5499         ret = 0;
5500 err:
5501         if (thread)
5502                 thread->looper_need_return = false;
5503         wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5504         if (ret && ret != -ERESTARTSYS)
5505                 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5506 err_unlocked:
5507         trace_binder_ioctl_done(ret);
5508         return ret;
5509 }
5510
5511 static void binder_vma_open(struct vm_area_struct *vma)
5512 {
5513         struct binder_proc *proc = vma->vm_private_data;
5514
5515         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5516                      "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5517                      proc->pid, vma->vm_start, vma->vm_end,
5518                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5519                      (unsigned long)pgprot_val(vma->vm_page_prot));
5520 }
5521
5522 static void binder_vma_close(struct vm_area_struct *vma)
5523 {
5524         struct binder_proc *proc = vma->vm_private_data;
5525
5526         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5527                      "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5528                      proc->pid, vma->vm_start, vma->vm_end,
5529                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5530                      (unsigned long)pgprot_val(vma->vm_page_prot));
5531         binder_alloc_vma_close(&proc->alloc);
5532 }
5533
5534 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5535 {
5536         return VM_FAULT_SIGBUS;
5537 }
5538
5539 static const struct vm_operations_struct binder_vm_ops = {
5540         .open = binder_vma_open,
5541         .close = binder_vma_close,
5542         .fault = binder_vm_fault,
5543 };
5544
5545 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5546 {
5547         struct binder_proc *proc = filp->private_data;
5548
5549         if (proc->tsk != current->group_leader)
5550                 return -EINVAL;
5551
5552         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5553                      "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5554                      __func__, proc->pid, vma->vm_start, vma->vm_end,
5555                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5556                      (unsigned long)pgprot_val(vma->vm_page_prot));
5557
5558         if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5559                 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5560                        proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5561                 return -EPERM;
5562         }
5563         vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5564         vma->vm_flags &= ~VM_MAYWRITE;
5565
5566         vma->vm_ops = &binder_vm_ops;
5567         vma->vm_private_data = proc;
5568
5569         return binder_alloc_mmap_handler(&proc->alloc, vma);
5570 }
5571
5572 static int binder_open(struct inode *nodp, struct file *filp)
5573 {
5574         struct binder_proc *proc, *itr;
5575         struct binder_device *binder_dev;
5576         struct binderfs_info *info;
5577         struct dentry *binder_binderfs_dir_entry_proc = NULL;
5578         bool existing_pid = false;
5579
5580         binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5581                      current->group_leader->pid, current->pid);
5582
5583         proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5584         if (proc == NULL)
5585                 return -ENOMEM;
5586         spin_lock_init(&proc->inner_lock);
5587         spin_lock_init(&proc->outer_lock);
5588         get_task_struct(current->group_leader);
5589         proc->tsk = current->group_leader;
5590         proc->cred = get_cred(filp->f_cred);
5591         INIT_LIST_HEAD(&proc->todo);
5592         proc->default_priority = task_nice(current);
5593         /* binderfs stashes devices in i_private */
5594         if (is_binderfs_device(nodp)) {
5595                 binder_dev = nodp->i_private;
5596                 info = nodp->i_sb->s_fs_info;
5597                 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5598         } else {
5599                 binder_dev = container_of(filp->private_data,
5600                                           struct binder_device, miscdev);
5601         }
5602         refcount_inc(&binder_dev->ref);
5603         proc->context = &binder_dev->context;
5604         binder_alloc_init(&proc->alloc);
5605
5606         binder_stats_created(BINDER_STAT_PROC);
5607         proc->pid = current->group_leader->pid;
5608         INIT_LIST_HEAD(&proc->delivered_death);
5609         INIT_LIST_HEAD(&proc->waiting_threads);
5610         filp->private_data = proc;
5611
5612         mutex_lock(&binder_procs_lock);
5613         hlist_for_each_entry(itr, &binder_procs, proc_node) {
5614                 if (itr->pid == proc->pid) {
5615                         existing_pid = true;
5616                         break;
5617                 }
5618         }
5619         hlist_add_head(&proc->proc_node, &binder_procs);
5620         mutex_unlock(&binder_procs_lock);
5621
5622         if (binder_debugfs_dir_entry_proc && !existing_pid) {
5623                 char strbuf[11];
5624
5625                 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5626                 /*
5627                  * proc debug entries are shared between contexts.
5628                  * Only create for the first PID to avoid debugfs log spamming
5629                  * The printing code will anyway print all contexts for a given
5630                  * PID so this is not a problem.
5631                  */
5632                 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5633                         binder_debugfs_dir_entry_proc,
5634                         (void *)(unsigned long)proc->pid,
5635                         &proc_fops);
5636         }
5637
5638         if (binder_binderfs_dir_entry_proc && !existing_pid) {
5639                 char strbuf[11];
5640                 struct dentry *binderfs_entry;
5641
5642                 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5643                 /*
5644                  * Similar to debugfs, the process specific log file is shared
5645                  * between contexts. Only create for the first PID.
5646                  * This is ok since same as debugfs, the log file will contain
5647                  * information on all contexts of a given PID.
5648                  */
5649                 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5650                         strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5651                 if (!IS_ERR(binderfs_entry)) {
5652                         proc->binderfs_entry = binderfs_entry;
5653                 } else {
5654                         int error;
5655
5656                         error = PTR_ERR(binderfs_entry);
5657                         pr_warn("Unable to create file %s in binderfs (error %d)\n",
5658                                 strbuf, error);
5659                 }
5660         }
5661
5662         return 0;
5663 }
5664
5665 static int binder_flush(struct file *filp, fl_owner_t id)
5666 {
5667         struct binder_proc *proc = filp->private_data;
5668
5669         binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5670
5671         return 0;
5672 }
5673
5674 static void binder_deferred_flush(struct binder_proc *proc)
5675 {
5676         struct rb_node *n;
5677         int wake_count = 0;
5678
5679         binder_inner_proc_lock(proc);
5680         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5681                 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5682
5683                 thread->looper_need_return = true;
5684                 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5685                         wake_up_interruptible(&thread->wait);
5686                         wake_count++;
5687                 }
5688         }
5689         binder_inner_proc_unlock(proc);
5690
5691         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5692                      "binder_flush: %d woke %d threads\n", proc->pid,
5693                      wake_count);
5694 }
5695
5696 static int binder_release(struct inode *nodp, struct file *filp)
5697 {
5698         struct binder_proc *proc = filp->private_data;
5699
5700         debugfs_remove(proc->debugfs_entry);
5701
5702         if (proc->binderfs_entry) {
5703                 binderfs_remove_file(proc->binderfs_entry);
5704                 proc->binderfs_entry = NULL;
5705         }
5706
5707         binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5708
5709         return 0;
5710 }
5711
5712 static int binder_node_release(struct binder_node *node, int refs)
5713 {
5714         struct binder_ref *ref;
5715         int death = 0;
5716         struct binder_proc *proc = node->proc;
5717
5718         binder_release_work(proc, &node->async_todo);
5719
5720         binder_node_lock(node);
5721         binder_inner_proc_lock(proc);
5722         binder_dequeue_work_ilocked(&node->work);
5723         /*
5724          * The caller must have taken a temporary ref on the node,
5725          */
5726         BUG_ON(!node->tmp_refs);
5727         if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5728                 binder_inner_proc_unlock(proc);
5729                 binder_node_unlock(node);
5730                 binder_free_node(node);
5731
5732                 return refs;
5733         }
5734
5735         node->proc = NULL;
5736         node->local_strong_refs = 0;
5737         node->local_weak_refs = 0;
5738         binder_inner_proc_unlock(proc);
5739
5740         spin_lock(&binder_dead_nodes_lock);
5741         hlist_add_head(&node->dead_node, &binder_dead_nodes);
5742         spin_unlock(&binder_dead_nodes_lock);
5743
5744         hlist_for_each_entry(ref, &node->refs, node_entry) {
5745                 refs++;
5746                 /*
5747                  * Need the node lock to synchronize
5748                  * with new notification requests and the
5749                  * inner lock to synchronize with queued
5750                  * death notifications.
5751                  */
5752                 binder_inner_proc_lock(ref->proc);
5753                 if (!ref->death) {
5754                         binder_inner_proc_unlock(ref->proc);
5755                         continue;
5756                 }
5757
5758                 death++;
5759
5760                 BUG_ON(!list_empty(&ref->death->work.entry));
5761                 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5762                 binder_enqueue_work_ilocked(&ref->death->work,
5763                                             &ref->proc->todo);
5764                 binder_wakeup_proc_ilocked(ref->proc);
5765                 binder_inner_proc_unlock(ref->proc);
5766         }
5767
5768         binder_debug(BINDER_DEBUG_DEAD_BINDER,
5769                      "node %d now dead, refs %d, death %d\n",
5770                      node->debug_id, refs, death);
5771         binder_node_unlock(node);
5772         binder_put_node(node);
5773
5774         return refs;
5775 }
5776
5777 static void binder_deferred_release(struct binder_proc *proc)
5778 {
5779         struct binder_context *context = proc->context;
5780         struct rb_node *n;
5781         int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5782
5783         mutex_lock(&binder_procs_lock);
5784         hlist_del(&proc->proc_node);
5785         mutex_unlock(&binder_procs_lock);
5786
5787         mutex_lock(&context->context_mgr_node_lock);
5788         if (context->binder_context_mgr_node &&
5789             context->binder_context_mgr_node->proc == proc) {
5790                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5791                              "%s: %d context_mgr_node gone\n",
5792                              __func__, proc->pid);
5793                 context->binder_context_mgr_node = NULL;
5794         }
5795         mutex_unlock(&context->context_mgr_node_lock);
5796         binder_inner_proc_lock(proc);
5797         /*
5798          * Make sure proc stays alive after we
5799          * remove all the threads
5800          */
5801         proc->tmp_ref++;
5802
5803         proc->is_dead = true;
5804         threads = 0;
5805         active_transactions = 0;
5806         while ((n = rb_first(&proc->threads))) {
5807                 struct binder_thread *thread;
5808
5809                 thread = rb_entry(n, struct binder_thread, rb_node);
5810                 binder_inner_proc_unlock(proc);
5811                 threads++;
5812                 active_transactions += binder_thread_release(proc, thread);
5813                 binder_inner_proc_lock(proc);
5814         }
5815
5816         nodes = 0;
5817         incoming_refs = 0;
5818         while ((n = rb_first(&proc->nodes))) {
5819                 struct binder_node *node;
5820
5821                 node = rb_entry(n, struct binder_node, rb_node);
5822                 nodes++;
5823                 /*
5824                  * take a temporary ref on the node before
5825                  * calling binder_node_release() which will either
5826                  * kfree() the node or call binder_put_node()
5827                  */
5828                 binder_inc_node_tmpref_ilocked(node);
5829                 rb_erase(&node->rb_node, &proc->nodes);
5830                 binder_inner_proc_unlock(proc);
5831                 incoming_refs = binder_node_release(node, incoming_refs);
5832                 binder_inner_proc_lock(proc);
5833         }
5834         binder_inner_proc_unlock(proc);
5835
5836         outgoing_refs = 0;
5837         binder_proc_lock(proc);
5838         while ((n = rb_first(&proc->refs_by_desc))) {
5839                 struct binder_ref *ref;
5840
5841                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5842                 outgoing_refs++;
5843                 binder_cleanup_ref_olocked(ref);
5844                 binder_proc_unlock(proc);
5845                 binder_free_ref(ref);
5846                 binder_proc_lock(proc);
5847         }
5848         binder_proc_unlock(proc);
5849
5850         binder_release_work(proc, &proc->todo);
5851         binder_release_work(proc, &proc->delivered_death);
5852
5853         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5854                      "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5855                      __func__, proc->pid, threads, nodes, incoming_refs,
5856                      outgoing_refs, active_transactions);
5857
5858         binder_proc_dec_tmpref(proc);
5859 }
5860
5861 static void binder_deferred_func(struct work_struct *work)
5862 {
5863         struct binder_proc *proc;
5864
5865         int defer;
5866
5867         do {
5868                 mutex_lock(&binder_deferred_lock);
5869                 if (!hlist_empty(&binder_deferred_list)) {
5870                         proc = hlist_entry(binder_deferred_list.first,
5871                                         struct binder_proc, deferred_work_node);
5872                         hlist_del_init(&proc->deferred_work_node);
5873                         defer = proc->deferred_work;
5874                         proc->deferred_work = 0;
5875                 } else {
5876                         proc = NULL;
5877                         defer = 0;
5878                 }
5879                 mutex_unlock(&binder_deferred_lock);
5880
5881                 if (defer & BINDER_DEFERRED_FLUSH)
5882                         binder_deferred_flush(proc);
5883
5884                 if (defer & BINDER_DEFERRED_RELEASE)
5885                         binder_deferred_release(proc); /* frees proc */
5886         } while (proc);
5887 }
5888 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5889
5890 static void
5891 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5892 {
5893         mutex_lock(&binder_deferred_lock);
5894         proc->deferred_work |= defer;
5895         if (hlist_unhashed(&proc->deferred_work_node)) {
5896                 hlist_add_head(&proc->deferred_work_node,
5897                                 &binder_deferred_list);
5898                 schedule_work(&binder_deferred_work);
5899         }
5900         mutex_unlock(&binder_deferred_lock);
5901 }
5902
5903 static void print_binder_transaction_ilocked(struct seq_file *m,
5904                                              struct binder_proc *proc,
5905                                              const char *prefix,
5906                                              struct binder_transaction *t)
5907 {
5908         struct binder_proc *to_proc;
5909         struct binder_buffer *buffer = t->buffer;
5910
5911         spin_lock(&t->lock);
5912         to_proc = t->to_proc;
5913         seq_printf(m,
5914                    "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5915                    prefix, t->debug_id, t,
5916                    t->from ? t->from->proc->pid : 0,
5917                    t->from ? t->from->pid : 0,
5918                    to_proc ? to_proc->pid : 0,
5919                    t->to_thread ? t->to_thread->pid : 0,
5920                    t->code, t->flags, t->priority, t->need_reply);
5921         spin_unlock(&t->lock);
5922
5923         if (proc != to_proc) {
5924                 /*
5925                  * Can only safely deref buffer if we are holding the
5926                  * correct proc inner lock for this node
5927                  */
5928                 seq_puts(m, "\n");
5929                 return;
5930         }
5931
5932         if (buffer == NULL) {
5933                 seq_puts(m, " buffer free\n");
5934                 return;
5935         }
5936         if (buffer->target_node)
5937                 seq_printf(m, " node %d", buffer->target_node->debug_id);
5938         seq_printf(m, " size %zd:%zd data %pK\n",
5939                    buffer->data_size, buffer->offsets_size,
5940                    buffer->user_data);
5941 }
5942
5943 static void print_binder_work_ilocked(struct seq_file *m,
5944                                      struct binder_proc *proc,
5945                                      const char *prefix,
5946                                      const char *transaction_prefix,
5947                                      struct binder_work *w)
5948 {
5949         struct binder_node *node;
5950         struct binder_transaction *t;
5951
5952         switch (w->type) {
5953         case BINDER_WORK_TRANSACTION:
5954                 t = container_of(w, struct binder_transaction, work);
5955                 print_binder_transaction_ilocked(
5956                                 m, proc, transaction_prefix, t);
5957                 break;
5958         case BINDER_WORK_RETURN_ERROR: {
5959                 struct binder_error *e = container_of(
5960                                 w, struct binder_error, work);
5961
5962                 seq_printf(m, "%stransaction error: %u\n",
5963                            prefix, e->cmd);
5964         } break;
5965         case BINDER_WORK_TRANSACTION_COMPLETE:
5966                 seq_printf(m, "%stransaction complete\n", prefix);
5967                 break;
5968         case BINDER_WORK_NODE:
5969                 node = container_of(w, struct binder_node, work);
5970                 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5971                            prefix, node->debug_id,
5972                            (u64)node->ptr, (u64)node->cookie);
5973                 break;
5974         case BINDER_WORK_DEAD_BINDER:
5975                 seq_printf(m, "%shas dead binder\n", prefix);
5976                 break;
5977         case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5978                 seq_printf(m, "%shas cleared dead binder\n", prefix);
5979                 break;
5980         case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5981                 seq_printf(m, "%shas cleared death notification\n", prefix);
5982                 break;
5983         default:
5984                 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5985                 break;
5986         }
5987 }
5988
5989 static void print_binder_thread_ilocked(struct seq_file *m,
5990                                         struct binder_thread *thread,
5991                                         int print_always)
5992 {
5993         struct binder_transaction *t;
5994         struct binder_work *w;
5995         size_t start_pos = m->count;
5996         size_t header_pos;
5997
5998         seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
5999                         thread->pid, thread->looper,
6000                         thread->looper_need_return,
6001                         atomic_read(&thread->tmp_ref));
6002         header_pos = m->count;
6003         t = thread->transaction_stack;
6004         while (t) {
6005                 if (t->from == thread) {
6006                         print_binder_transaction_ilocked(m, thread->proc,
6007                                         "    outgoing transaction", t);
6008                         t = t->from_parent;
6009                 } else if (t->to_thread == thread) {
6010                         print_binder_transaction_ilocked(m, thread->proc,
6011                                                  "    incoming transaction", t);
6012                         t = t->to_parent;
6013                 } else {
6014                         print_binder_transaction_ilocked(m, thread->proc,
6015                                         "    bad transaction", t);
6016                         t = NULL;
6017                 }
6018         }
6019         list_for_each_entry(w, &thread->todo, entry) {
6020                 print_binder_work_ilocked(m, thread->proc, "    ",
6021                                           "    pending transaction", w);
6022         }
6023         if (!print_always && m->count == header_pos)
6024                 m->count = start_pos;
6025 }
6026
6027 static void print_binder_node_nilocked(struct seq_file *m,
6028                                        struct binder_node *node)
6029 {
6030         struct binder_ref *ref;
6031         struct binder_work *w;
6032         int count;
6033
6034         count = 0;
6035         hlist_for_each_entry(ref, &node->refs, node_entry)
6036                 count++;
6037
6038         seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6039                    node->debug_id, (u64)node->ptr, (u64)node->cookie,
6040                    node->has_strong_ref, node->has_weak_ref,
6041                    node->local_strong_refs, node->local_weak_refs,
6042                    node->internal_strong_refs, count, node->tmp_refs);
6043         if (count) {
6044                 seq_puts(m, " proc");
6045                 hlist_for_each_entry(ref, &node->refs, node_entry)
6046                         seq_printf(m, " %d", ref->proc->pid);
6047         }
6048         seq_puts(m, "\n");
6049         if (node->proc) {
6050                 list_for_each_entry(w, &node->async_todo, entry)
6051                         print_binder_work_ilocked(m, node->proc, "    ",
6052                                           "    pending async transaction", w);
6053         }
6054 }
6055
6056 static void print_binder_ref_olocked(struct seq_file *m,
6057                                      struct binder_ref *ref)
6058 {
6059         binder_node_lock(ref->node);
6060         seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
6061                    ref->data.debug_id, ref->data.desc,
6062                    ref->node->proc ? "" : "dead ",
6063                    ref->node->debug_id, ref->data.strong,
6064                    ref->data.weak, ref->death);
6065         binder_node_unlock(ref->node);
6066 }
6067
6068 static void print_binder_proc(struct seq_file *m,
6069                               struct binder_proc *proc, int print_all)
6070 {
6071         struct binder_work *w;
6072         struct rb_node *n;
6073         size_t start_pos = m->count;
6074         size_t header_pos;
6075         struct binder_node *last_node = NULL;
6076
6077         seq_printf(m, "proc %d\n", proc->pid);
6078         seq_printf(m, "context %s\n", proc->context->name);
6079         header_pos = m->count;
6080
6081         binder_inner_proc_lock(proc);
6082         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6083                 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6084                                                 rb_node), print_all);
6085
6086         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6087                 struct binder_node *node = rb_entry(n, struct binder_node,
6088                                                     rb_node);
6089                 if (!print_all && !node->has_async_transaction)
6090                         continue;
6091
6092                 /*
6093                  * take a temporary reference on the node so it
6094                  * survives and isn't removed from the tree
6095                  * while we print it.
6096                  */
6097                 binder_inc_node_tmpref_ilocked(node);
6098                 /* Need to drop inner lock to take node lock */
6099                 binder_inner_proc_unlock(proc);
6100                 if (last_node)
6101                         binder_put_node(last_node);
6102                 binder_node_inner_lock(node);
6103                 print_binder_node_nilocked(m, node);
6104                 binder_node_inner_unlock(node);
6105                 last_node = node;
6106                 binder_inner_proc_lock(proc);
6107         }
6108         binder_inner_proc_unlock(proc);
6109         if (last_node)
6110                 binder_put_node(last_node);
6111
6112         if (print_all) {
6113                 binder_proc_lock(proc);
6114                 for (n = rb_first(&proc->refs_by_desc);
6115                      n != NULL;
6116                      n = rb_next(n))
6117                         print_binder_ref_olocked(m, rb_entry(n,
6118                                                             struct binder_ref,
6119                                                             rb_node_desc));
6120                 binder_proc_unlock(proc);
6121         }
6122         binder_alloc_print_allocated(m, &proc->alloc);
6123         binder_inner_proc_lock(proc);
6124         list_for_each_entry(w, &proc->todo, entry)
6125                 print_binder_work_ilocked(m, proc, "  ",
6126                                           "  pending transaction", w);
6127         list_for_each_entry(w, &proc->delivered_death, entry) {
6128                 seq_puts(m, "  has delivered dead binder\n");
6129                 break;
6130         }
6131         binder_inner_proc_unlock(proc);
6132         if (!print_all && m->count == header_pos)
6133                 m->count = start_pos;
6134 }
6135
6136 static const char * const binder_return_strings[] = {
6137         "BR_ERROR",
6138         "BR_OK",
6139         "BR_TRANSACTION",
6140         "BR_REPLY",
6141         "BR_ACQUIRE_RESULT",
6142         "BR_DEAD_REPLY",
6143         "BR_TRANSACTION_COMPLETE",
6144         "BR_INCREFS",
6145         "BR_ACQUIRE",
6146         "BR_RELEASE",
6147         "BR_DECREFS",
6148         "BR_ATTEMPT_ACQUIRE",
6149         "BR_NOOP",
6150         "BR_SPAWN_LOOPER",
6151         "BR_FINISHED",
6152         "BR_DEAD_BINDER",
6153         "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6154         "BR_FAILED_REPLY"
6155 };
6156
6157 static const char * const binder_command_strings[] = {
6158         "BC_TRANSACTION",
6159         "BC_REPLY",
6160         "BC_ACQUIRE_RESULT",
6161         "BC_FREE_BUFFER",
6162         "BC_INCREFS",
6163         "BC_ACQUIRE",
6164         "BC_RELEASE",
6165         "BC_DECREFS",
6166         "BC_INCREFS_DONE",
6167         "BC_ACQUIRE_DONE",
6168         "BC_ATTEMPT_ACQUIRE",
6169         "BC_REGISTER_LOOPER",
6170         "BC_ENTER_LOOPER",
6171         "BC_EXIT_LOOPER",
6172         "BC_REQUEST_DEATH_NOTIFICATION",
6173         "BC_CLEAR_DEATH_NOTIFICATION",
6174         "BC_DEAD_BINDER_DONE",
6175         "BC_TRANSACTION_SG",
6176         "BC_REPLY_SG",
6177 };
6178
6179 static const char * const binder_objstat_strings[] = {
6180         "proc",
6181         "thread",
6182         "node",
6183         "ref",
6184         "death",
6185         "transaction",
6186         "transaction_complete"
6187 };
6188
6189 static void print_binder_stats(struct seq_file *m, const char *prefix,
6190                                struct binder_stats *stats)
6191 {
6192         int i;
6193
6194         BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6195                      ARRAY_SIZE(binder_command_strings));
6196         for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6197                 int temp = atomic_read(&stats->bc[i]);
6198
6199                 if (temp)
6200                         seq_printf(m, "%s%s: %d\n", prefix,
6201                                    binder_command_strings[i], temp);
6202         }
6203
6204         BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6205                      ARRAY_SIZE(binder_return_strings));
6206         for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6207                 int temp = atomic_read(&stats->br[i]);
6208
6209                 if (temp)
6210                         seq_printf(m, "%s%s: %d\n", prefix,
6211                                    binder_return_strings[i], temp);
6212         }
6213
6214         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6215                      ARRAY_SIZE(binder_objstat_strings));
6216         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6217                      ARRAY_SIZE(stats->obj_deleted));
6218         for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6219                 int created = atomic_read(&stats->obj_created[i]);
6220                 int deleted = atomic_read(&stats->obj_deleted[i]);
6221
6222                 if (created || deleted)
6223                         seq_printf(m, "%s%s: active %d total %d\n",
6224                                 prefix,
6225                                 binder_objstat_strings[i],
6226                                 created - deleted,
6227                                 created);
6228         }
6229 }
6230
6231 static void print_binder_proc_stats(struct seq_file *m,
6232                                     struct binder_proc *proc)
6233 {
6234         struct binder_work *w;
6235         struct binder_thread *thread;
6236         struct rb_node *n;
6237         int count, strong, weak, ready_threads;
6238         size_t free_async_space =
6239                 binder_alloc_get_free_async_space(&proc->alloc);
6240
6241         seq_printf(m, "proc %d\n", proc->pid);
6242         seq_printf(m, "context %s\n", proc->context->name);
6243         count = 0;
6244         ready_threads = 0;
6245         binder_inner_proc_lock(proc);
6246         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6247                 count++;
6248
6249         list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6250                 ready_threads++;
6251
6252         seq_printf(m, "  threads: %d\n", count);
6253         seq_printf(m, "  requested threads: %d+%d/%d\n"
6254                         "  ready threads %d\n"
6255                         "  free async space %zd\n", proc->requested_threads,
6256                         proc->requested_threads_started, proc->max_threads,
6257                         ready_threads,
6258                         free_async_space);
6259         count = 0;
6260         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6261                 count++;
6262         binder_inner_proc_unlock(proc);
6263         seq_printf(m, "  nodes: %d\n", count);
6264         count = 0;
6265         strong = 0;
6266         weak = 0;
6267         binder_proc_lock(proc);
6268         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6269                 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6270                                                   rb_node_desc);
6271                 count++;
6272                 strong += ref->data.strong;
6273                 weak += ref->data.weak;
6274         }
6275         binder_proc_unlock(proc);
6276         seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
6277
6278         count = binder_alloc_get_allocated_count(&proc->alloc);
6279         seq_printf(m, "  buffers: %d\n", count);
6280
6281         binder_alloc_print_pages(m, &proc->alloc);
6282
6283         count = 0;
6284         binder_inner_proc_lock(proc);
6285         list_for_each_entry(w, &proc->todo, entry) {
6286                 if (w->type == BINDER_WORK_TRANSACTION)
6287                         count++;
6288         }
6289         binder_inner_proc_unlock(proc);
6290         seq_printf(m, "  pending transactions: %d\n", count);
6291
6292         print_binder_stats(m, "  ", &proc->stats);
6293 }
6294
6295
6296 int binder_state_show(struct seq_file *m, void *unused)
6297 {
6298         struct binder_proc *proc;
6299         struct binder_node *node;
6300         struct binder_node *last_node = NULL;
6301
6302         seq_puts(m, "binder state:\n");
6303
6304         spin_lock(&binder_dead_nodes_lock);
6305         if (!hlist_empty(&binder_dead_nodes))
6306                 seq_puts(m, "dead nodes:\n");
6307         hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6308                 /*
6309                  * take a temporary reference on the node so it
6310                  * survives and isn't removed from the list
6311                  * while we print it.
6312                  */
6313                 node->tmp_refs++;
6314                 spin_unlock(&binder_dead_nodes_lock);
6315                 if (last_node)
6316                         binder_put_node(last_node);
6317                 binder_node_lock(node);
6318                 print_binder_node_nilocked(m, node);
6319                 binder_node_unlock(node);
6320                 last_node = node;
6321                 spin_lock(&binder_dead_nodes_lock);
6322         }
6323         spin_unlock(&binder_dead_nodes_lock);
6324         if (last_node)
6325                 binder_put_node(last_node);
6326
6327         mutex_lock(&binder_procs_lock);
6328         hlist_for_each_entry(proc, &binder_procs, proc_node)
6329                 print_binder_proc(m, proc, 1);
6330         mutex_unlock(&binder_procs_lock);
6331
6332         return 0;
6333 }
6334
6335 int binder_stats_show(struct seq_file *m, void *unused)
6336 {
6337         struct binder_proc *proc;
6338
6339         seq_puts(m, "binder stats:\n");
6340
6341         print_binder_stats(m, "", &binder_stats);
6342
6343         mutex_lock(&binder_procs_lock);
6344         hlist_for_each_entry(proc, &binder_procs, proc_node)
6345                 print_binder_proc_stats(m, proc);
6346         mutex_unlock(&binder_procs_lock);
6347
6348         return 0;
6349 }
6350
6351 int binder_transactions_show(struct seq_file *m, void *unused)
6352 {
6353         struct binder_proc *proc;
6354
6355         seq_puts(m, "binder transactions:\n");
6356         mutex_lock(&binder_procs_lock);
6357         hlist_for_each_entry(proc, &binder_procs, proc_node)
6358                 print_binder_proc(m, proc, 0);
6359         mutex_unlock(&binder_procs_lock);
6360
6361         return 0;
6362 }
6363
6364 static int proc_show(struct seq_file *m, void *unused)
6365 {
6366         struct binder_proc *itr;
6367         int pid = (unsigned long)m->private;
6368
6369         mutex_lock(&binder_procs_lock);
6370         hlist_for_each_entry(itr, &binder_procs, proc_node) {
6371                 if (itr->pid == pid) {
6372                         seq_puts(m, "binder proc state:\n");
6373                         print_binder_proc(m, itr, 1);
6374                 }
6375         }
6376         mutex_unlock(&binder_procs_lock);
6377
6378         return 0;
6379 }
6380
6381 static void print_binder_transaction_log_entry(struct seq_file *m,
6382                                         struct binder_transaction_log_entry *e)
6383 {
6384         int debug_id = READ_ONCE(e->debug_id_done);
6385         /*
6386          * read barrier to guarantee debug_id_done read before
6387          * we print the log values
6388          */
6389         smp_rmb();
6390         seq_printf(m,
6391                    "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6392                    e->debug_id, (e->call_type == 2) ? "reply" :
6393                    ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6394                    e->from_thread, e->to_proc, e->to_thread, e->context_name,
6395                    e->to_node, e->target_handle, e->data_size, e->offsets_size,
6396                    e->return_error, e->return_error_param,
6397                    e->return_error_line);
6398         /*
6399          * read-barrier to guarantee read of debug_id_done after
6400          * done printing the fields of the entry
6401          */
6402         smp_rmb();
6403         seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6404                         "\n" : " (incomplete)\n");
6405 }
6406
6407 int binder_transaction_log_show(struct seq_file *m, void *unused)
6408 {
6409         struct binder_transaction_log *log = m->private;
6410         unsigned int log_cur = atomic_read(&log->cur);
6411         unsigned int count;
6412         unsigned int cur;
6413         int i;
6414
6415         count = log_cur + 1;
6416         cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6417                 0 : count % ARRAY_SIZE(log->entry);
6418         if (count > ARRAY_SIZE(log->entry) || log->full)
6419                 count = ARRAY_SIZE(log->entry);
6420         for (i = 0; i < count; i++) {
6421                 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6422
6423                 print_binder_transaction_log_entry(m, &log->entry[index]);
6424         }
6425         return 0;
6426 }
6427
6428 const struct file_operations binder_fops = {
6429         .owner = THIS_MODULE,
6430         .poll = binder_poll,
6431         .unlocked_ioctl = binder_ioctl,
6432         .compat_ioctl = compat_ptr_ioctl,
6433         .mmap = binder_mmap,
6434         .open = binder_open,
6435         .flush = binder_flush,
6436         .release = binder_release,
6437         .may_pollfree = true,
6438 };
6439
6440 static int __init init_binder_device(const char *name)
6441 {
6442         int ret;
6443         struct binder_device *binder_device;
6444
6445         binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6446         if (!binder_device)
6447                 return -ENOMEM;
6448
6449         binder_device->miscdev.fops = &binder_fops;
6450         binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6451         binder_device->miscdev.name = name;
6452
6453         refcount_set(&binder_device->ref, 1);
6454         binder_device->context.binder_context_mgr_uid = INVALID_UID;
6455         binder_device->context.name = name;
6456         mutex_init(&binder_device->context.context_mgr_node_lock);
6457
6458         ret = misc_register(&binder_device->miscdev);
6459         if (ret < 0) {
6460                 kfree(binder_device);
6461                 return ret;
6462         }
6463
6464         hlist_add_head(&binder_device->hlist, &binder_devices);
6465
6466         return ret;
6467 }
6468
6469 static int __init binder_init(void)
6470 {
6471         int ret;
6472         char *device_name, *device_tmp;
6473         struct binder_device *device;
6474         struct hlist_node *tmp;
6475         char *device_names = NULL;
6476
6477         ret = binder_alloc_shrinker_init();
6478         if (ret)
6479                 return ret;
6480
6481         atomic_set(&binder_transaction_log.cur, ~0U);
6482         atomic_set(&binder_transaction_log_failed.cur, ~0U);
6483
6484         binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6485         if (binder_debugfs_dir_entry_root)
6486                 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6487                                                  binder_debugfs_dir_entry_root);
6488
6489         if (binder_debugfs_dir_entry_root) {
6490                 debugfs_create_file("state",
6491                                     0444,
6492                                     binder_debugfs_dir_entry_root,
6493                                     NULL,
6494                                     &binder_state_fops);
6495                 debugfs_create_file("stats",
6496                                     0444,
6497                                     binder_debugfs_dir_entry_root,
6498                                     NULL,
6499                                     &binder_stats_fops);
6500                 debugfs_create_file("transactions",
6501                                     0444,
6502                                     binder_debugfs_dir_entry_root,
6503                                     NULL,
6504                                     &binder_transactions_fops);
6505                 debugfs_create_file("transaction_log",
6506                                     0444,
6507                                     binder_debugfs_dir_entry_root,
6508                                     &binder_transaction_log,
6509                                     &binder_transaction_log_fops);
6510                 debugfs_create_file("failed_transaction_log",
6511                                     0444,
6512                                     binder_debugfs_dir_entry_root,
6513                                     &binder_transaction_log_failed,
6514                                     &binder_transaction_log_fops);
6515         }
6516
6517         if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6518             strcmp(binder_devices_param, "") != 0) {
6519                 /*
6520                 * Copy the module_parameter string, because we don't want to
6521                 * tokenize it in-place.
6522                  */
6523                 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6524                 if (!device_names) {
6525                         ret = -ENOMEM;
6526                         goto err_alloc_device_names_failed;
6527                 }
6528
6529                 device_tmp = device_names;
6530                 while ((device_name = strsep(&device_tmp, ","))) {
6531                         ret = init_binder_device(device_name);
6532                         if (ret)
6533                                 goto err_init_binder_device_failed;
6534                 }
6535         }
6536
6537         ret = init_binderfs();
6538         if (ret)
6539                 goto err_init_binder_device_failed;
6540
6541         return ret;
6542
6543 err_init_binder_device_failed:
6544         hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6545                 misc_deregister(&device->miscdev);
6546                 hlist_del(&device->hlist);
6547                 kfree(device);
6548         }
6549
6550         kfree(device_names);
6551
6552 err_alloc_device_names_failed:
6553         debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6554         binder_alloc_shrinker_exit();
6555
6556         return ret;
6557 }
6558
6559 device_initcall(binder_init);
6560
6561 #define CREATE_TRACE_POINTS
6562 #include "binder_trace.h"
6563
6564 MODULE_LICENSE("GPL v2");