1 // SPDX-License-Identifier: GPL-2.0-only
4 * Android IPC Subsystem
6 * Copyright (C) 2007-2008 Google, Inc.
12 * There are 3 main spinlocks which must be acquired in the
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
70 #include <uapi/linux/android/binder.h>
71 #include <uapi/linux/android/binderfs.h>
73 #include <asm/cacheflush.h>
75 #include "binder_alloc.h"
76 #include "binder_internal.h"
77 #include "binder_trace.h"
79 static HLIST_HEAD(binder_deferred_list);
80 static DEFINE_MUTEX(binder_deferred_lock);
82 static HLIST_HEAD(binder_devices);
83 static HLIST_HEAD(binder_procs);
84 static DEFINE_MUTEX(binder_procs_lock);
86 static HLIST_HEAD(binder_dead_nodes);
87 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
89 static struct dentry *binder_debugfs_dir_entry_root;
90 static struct dentry *binder_debugfs_dir_entry_proc;
91 static atomic_t binder_last_id;
93 static int proc_show(struct seq_file *m, void *unused);
94 DEFINE_SHOW_ATTRIBUTE(proc);
96 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
99 BINDER_DEBUG_USER_ERROR = 1U << 0,
100 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
101 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
102 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
103 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
104 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
105 BINDER_DEBUG_READ_WRITE = 1U << 6,
106 BINDER_DEBUG_USER_REFS = 1U << 7,
107 BINDER_DEBUG_THREADS = 1U << 8,
108 BINDER_DEBUG_TRANSACTION = 1U << 9,
109 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
110 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
111 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
112 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
113 BINDER_DEBUG_SPINLOCKS = 1U << 14,
115 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
116 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
117 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
119 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
120 module_param_named(devices, binder_devices_param, charp, 0444);
122 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
123 static int binder_stop_on_user_error;
125 static int binder_set_stop_on_user_error(const char *val,
126 const struct kernel_param *kp)
130 ret = param_set_int(val, kp);
131 if (binder_stop_on_user_error < 2)
132 wake_up(&binder_user_error_wait);
135 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
136 param_get_int, &binder_stop_on_user_error, 0644);
138 #define binder_debug(mask, x...) \
140 if (binder_debug_mask & mask) \
141 pr_info_ratelimited(x); \
144 #define binder_user_error(x...) \
146 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
147 pr_info_ratelimited(x); \
148 if (binder_stop_on_user_error) \
149 binder_stop_on_user_error = 2; \
152 #define to_flat_binder_object(hdr) \
153 container_of(hdr, struct flat_binder_object, hdr)
155 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
157 #define to_binder_buffer_object(hdr) \
158 container_of(hdr, struct binder_buffer_object, hdr)
160 #define to_binder_fd_array_object(hdr) \
161 container_of(hdr, struct binder_fd_array_object, hdr)
163 enum binder_stat_types {
169 BINDER_STAT_TRANSACTION,
170 BINDER_STAT_TRANSACTION_COMPLETE,
174 struct binder_stats {
175 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
176 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
177 atomic_t obj_created[BINDER_STAT_COUNT];
178 atomic_t obj_deleted[BINDER_STAT_COUNT];
181 static struct binder_stats binder_stats;
183 static inline void binder_stats_deleted(enum binder_stat_types type)
185 atomic_inc(&binder_stats.obj_deleted[type]);
188 static inline void binder_stats_created(enum binder_stat_types type)
190 atomic_inc(&binder_stats.obj_created[type]);
193 struct binder_transaction_log binder_transaction_log;
194 struct binder_transaction_log binder_transaction_log_failed;
196 static struct binder_transaction_log_entry *binder_transaction_log_add(
197 struct binder_transaction_log *log)
199 struct binder_transaction_log_entry *e;
200 unsigned int cur = atomic_inc_return(&log->cur);
202 if (cur >= ARRAY_SIZE(log->entry))
204 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
205 WRITE_ONCE(e->debug_id_done, 0);
207 * write-barrier to synchronize access to e->debug_id_done.
208 * We make sure the initialized 0 value is seen before
209 * memset() other fields are zeroed by memset.
212 memset(e, 0, sizeof(*e));
217 * struct binder_work - work enqueued on a worklist
218 * @entry: node enqueued on list
219 * @type: type of work to be performed
221 * There are separate work lists for proc, thread, and node (async).
224 struct list_head entry;
226 enum binder_work_type {
227 BINDER_WORK_TRANSACTION = 1,
228 BINDER_WORK_TRANSACTION_COMPLETE,
229 BINDER_WORK_RETURN_ERROR,
231 BINDER_WORK_DEAD_BINDER,
232 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
233 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
237 struct binder_error {
238 struct binder_work work;
243 * struct binder_node - binder node bookkeeping
244 * @debug_id: unique ID for debugging
245 * (invariant after initialized)
246 * @lock: lock for node fields
247 * @work: worklist element for node work
248 * (protected by @proc->inner_lock)
249 * @rb_node: element for proc->nodes tree
250 * (protected by @proc->inner_lock)
251 * @dead_node: element for binder_dead_nodes list
252 * (protected by binder_dead_nodes_lock)
253 * @proc: binder_proc that owns this node
254 * (invariant after initialized)
255 * @refs: list of references on this node
256 * (protected by @lock)
257 * @internal_strong_refs: used to take strong references when
258 * initiating a transaction
259 * (protected by @proc->inner_lock if @proc
261 * @local_weak_refs: weak user refs from local process
262 * (protected by @proc->inner_lock if @proc
264 * @local_strong_refs: strong user refs from local process
265 * (protected by @proc->inner_lock if @proc
267 * @tmp_refs: temporary kernel refs
268 * (protected by @proc->inner_lock while @proc
269 * is valid, and by binder_dead_nodes_lock
270 * if @proc is NULL. During inc/dec and node release
271 * it is also protected by @lock to provide safety
272 * as the node dies and @proc becomes NULL)
273 * @ptr: userspace pointer for node
274 * (invariant, no lock needed)
275 * @cookie: userspace cookie for node
276 * (invariant, no lock needed)
277 * @has_strong_ref: userspace notified of strong ref
278 * (protected by @proc->inner_lock if @proc
280 * @pending_strong_ref: userspace has acked notification of strong ref
281 * (protected by @proc->inner_lock if @proc
283 * @has_weak_ref: userspace notified of weak ref
284 * (protected by @proc->inner_lock if @proc
286 * @pending_weak_ref: userspace has acked notification of weak ref
287 * (protected by @proc->inner_lock if @proc
289 * @has_async_transaction: async transaction to node in progress
290 * (protected by @lock)
291 * @accept_fds: file descriptor operations supported for node
292 * (invariant after initialized)
293 * @min_priority: minimum scheduling priority
294 * (invariant after initialized)
295 * @txn_security_ctx: require sender's security context
296 * (invariant after initialized)
297 * @async_todo: list of async work items
298 * (protected by @proc->inner_lock)
300 * Bookkeeping structure for binder nodes.
305 struct binder_work work;
307 struct rb_node rb_node;
308 struct hlist_node dead_node;
310 struct binder_proc *proc;
311 struct hlist_head refs;
312 int internal_strong_refs;
314 int local_strong_refs;
316 binder_uintptr_t ptr;
317 binder_uintptr_t cookie;
320 * bitfield elements protected by
324 u8 pending_strong_ref:1;
326 u8 pending_weak_ref:1;
330 * invariant after initialization
333 u8 txn_security_ctx:1;
336 bool has_async_transaction;
337 struct list_head async_todo;
340 struct binder_ref_death {
342 * @work: worklist element for death notifications
343 * (protected by inner_lock of the proc that
344 * this ref belongs to)
346 struct binder_work work;
347 binder_uintptr_t cookie;
351 * struct binder_ref_data - binder_ref counts and id
352 * @debug_id: unique ID for the ref
353 * @desc: unique userspace handle for ref
354 * @strong: strong ref count (debugging only if not locked)
355 * @weak: weak ref count (debugging only if not locked)
357 * Structure to hold ref count and ref id information. Since
358 * the actual ref can only be accessed with a lock, this structure
359 * is used to return information about the ref to callers of
360 * ref inc/dec functions.
362 struct binder_ref_data {
370 * struct binder_ref - struct to track references on nodes
371 * @data: binder_ref_data containing id, handle, and current refcounts
372 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
373 * @rb_node_node: node for lookup by @node in proc's rb_tree
374 * @node_entry: list entry for node->refs list in target node
375 * (protected by @node->lock)
376 * @proc: binder_proc containing ref
377 * @node: binder_node of target node. When cleaning up a
378 * ref for deletion in binder_cleanup_ref, a non-NULL
379 * @node indicates the node must be freed
380 * @death: pointer to death notification (ref_death) if requested
381 * (protected by @node->lock)
383 * Structure to track references from procA to target node (on procB). This
384 * structure is unsafe to access without holding @proc->outer_lock.
387 /* Lookups needed: */
388 /* node + proc => ref (transaction) */
389 /* desc + proc => ref (transaction, inc/dec ref) */
390 /* node => refs + procs (proc exit) */
391 struct binder_ref_data data;
392 struct rb_node rb_node_desc;
393 struct rb_node rb_node_node;
394 struct hlist_node node_entry;
395 struct binder_proc *proc;
396 struct binder_node *node;
397 struct binder_ref_death *death;
400 enum binder_deferred_state {
401 BINDER_DEFERRED_FLUSH = 0x01,
402 BINDER_DEFERRED_RELEASE = 0x02,
406 * struct binder_proc - binder process bookkeeping
407 * @proc_node: element for binder_procs list
408 * @threads: rbtree of binder_threads in this proc
409 * (protected by @inner_lock)
410 * @nodes: rbtree of binder nodes associated with
411 * this proc ordered by node->ptr
412 * (protected by @inner_lock)
413 * @refs_by_desc: rbtree of refs ordered by ref->desc
414 * (protected by @outer_lock)
415 * @refs_by_node: rbtree of refs ordered by ref->node
416 * (protected by @outer_lock)
417 * @waiting_threads: threads currently waiting for proc work
418 * (protected by @inner_lock)
419 * @pid PID of group_leader of process
420 * (invariant after initialized)
421 * @tsk task_struct for group_leader of process
422 * (invariant after initialized)
423 * @cred struct cred associated with the `struct file`
425 * (invariant after initialized)
426 * @deferred_work_node: element for binder_deferred_list
427 * (protected by binder_deferred_lock)
428 * @deferred_work: bitmap of deferred work to perform
429 * (protected by binder_deferred_lock)
430 * @is_dead: process is dead and awaiting free
431 * when outstanding transactions are cleaned up
432 * (protected by @inner_lock)
433 * @todo: list of work for this process
434 * (protected by @inner_lock)
435 * @stats: per-process binder statistics
436 * (atomics, no lock needed)
437 * @delivered_death: list of delivered death notification
438 * (protected by @inner_lock)
439 * @max_threads: cap on number of binder threads
440 * (protected by @inner_lock)
441 * @requested_threads: number of binder threads requested but not
442 * yet started. In current implementation, can
444 * (protected by @inner_lock)
445 * @requested_threads_started: number binder threads started
446 * (protected by @inner_lock)
447 * @tmp_ref: temporary reference to indicate proc is in use
448 * (protected by @inner_lock)
449 * @default_priority: default scheduler priority
450 * (invariant after initialized)
451 * @debugfs_entry: debugfs node
452 * @alloc: binder allocator bookkeeping
453 * @context: binder_context for this proc
454 * (invariant after initialized)
455 * @inner_lock: can nest under outer_lock and/or node lock
456 * @outer_lock: no nesting under innor or node lock
457 * Lock order: 1) outer, 2) node, 3) inner
458 * @binderfs_entry: process-specific binderfs log file
460 * Bookkeeping structure for binder processes
463 struct hlist_node proc_node;
464 struct rb_root threads;
465 struct rb_root nodes;
466 struct rb_root refs_by_desc;
467 struct rb_root refs_by_node;
468 struct list_head waiting_threads;
470 struct task_struct *tsk;
471 const struct cred *cred;
472 struct hlist_node deferred_work_node;
476 struct list_head todo;
477 struct binder_stats stats;
478 struct list_head delivered_death;
480 int requested_threads;
481 int requested_threads_started;
483 long default_priority;
484 struct dentry *debugfs_entry;
485 struct binder_alloc alloc;
486 struct binder_context *context;
487 spinlock_t inner_lock;
488 spinlock_t outer_lock;
489 struct dentry *binderfs_entry;
493 BINDER_LOOPER_STATE_REGISTERED = 0x01,
494 BINDER_LOOPER_STATE_ENTERED = 0x02,
495 BINDER_LOOPER_STATE_EXITED = 0x04,
496 BINDER_LOOPER_STATE_INVALID = 0x08,
497 BINDER_LOOPER_STATE_WAITING = 0x10,
498 BINDER_LOOPER_STATE_POLL = 0x20,
502 * struct binder_thread - binder thread bookkeeping
503 * @proc: binder process for this thread
504 * (invariant after initialization)
505 * @rb_node: element for proc->threads rbtree
506 * (protected by @proc->inner_lock)
507 * @waiting_thread_node: element for @proc->waiting_threads list
508 * (protected by @proc->inner_lock)
509 * @pid: PID for this thread
510 * (invariant after initialization)
511 * @looper: bitmap of looping state
512 * (only accessed by this thread)
513 * @looper_needs_return: looping thread needs to exit driver
515 * @transaction_stack: stack of in-progress transactions for this thread
516 * (protected by @proc->inner_lock)
517 * @todo: list of work to do for this thread
518 * (protected by @proc->inner_lock)
519 * @process_todo: whether work in @todo should be processed
520 * (protected by @proc->inner_lock)
521 * @return_error: transaction errors reported by this thread
522 * (only accessed by this thread)
523 * @reply_error: transaction errors reported by target thread
524 * (protected by @proc->inner_lock)
525 * @wait: wait queue for thread work
526 * @stats: per-thread statistics
527 * (atomics, no lock needed)
528 * @tmp_ref: temporary reference to indicate thread is in use
529 * (atomic since @proc->inner_lock cannot
530 * always be acquired)
531 * @is_dead: thread is dead and awaiting free
532 * when outstanding transactions are cleaned up
533 * (protected by @proc->inner_lock)
535 * Bookkeeping structure for binder threads.
537 struct binder_thread {
538 struct binder_proc *proc;
539 struct rb_node rb_node;
540 struct list_head waiting_thread_node;
542 int looper; /* only modified by this thread */
543 bool looper_need_return; /* can be written by other thread */
544 struct binder_transaction *transaction_stack;
545 struct list_head todo;
547 struct binder_error return_error;
548 struct binder_error reply_error;
549 wait_queue_head_t wait;
550 struct binder_stats stats;
556 * struct binder_txn_fd_fixup - transaction fd fixup list element
557 * @fixup_entry: list entry
558 * @file: struct file to be associated with new fd
559 * @offset: offset in buffer data to this fixup
561 * List element for fd fixups in a transaction. Since file
562 * descriptors need to be allocated in the context of the
563 * target process, we pass each fd to be processed in this
566 struct binder_txn_fd_fixup {
567 struct list_head fixup_entry;
572 struct binder_transaction {
574 struct binder_work work;
575 struct binder_thread *from;
576 struct binder_transaction *from_parent;
577 struct binder_proc *to_proc;
578 struct binder_thread *to_thread;
579 struct binder_transaction *to_parent;
580 unsigned need_reply:1;
581 /* unsigned is_dead:1; */ /* not used at the moment */
583 struct binder_buffer *buffer;
589 struct list_head fd_fixups;
590 binder_uintptr_t security_ctx;
592 * @lock: protects @from, @to_proc, and @to_thread
594 * @from, @to_proc, and @to_thread can be set to NULL
595 * during thread teardown
601 * struct binder_object - union of flat binder object types
602 * @hdr: generic object header
603 * @fbo: binder object (nodes and refs)
604 * @fdo: file descriptor object
605 * @bbo: binder buffer pointer
606 * @fdao: file descriptor array
608 * Used for type-independent object copies
610 struct binder_object {
612 struct binder_object_header hdr;
613 struct flat_binder_object fbo;
614 struct binder_fd_object fdo;
615 struct binder_buffer_object bbo;
616 struct binder_fd_array_object fdao;
621 * binder_proc_lock() - Acquire outer lock for given binder_proc
622 * @proc: struct binder_proc to acquire
624 * Acquires proc->outer_lock. Used to protect binder_ref
625 * structures associated with the given proc.
627 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
629 _binder_proc_lock(struct binder_proc *proc, int line)
630 __acquires(&proc->outer_lock)
632 binder_debug(BINDER_DEBUG_SPINLOCKS,
633 "%s: line=%d\n", __func__, line);
634 spin_lock(&proc->outer_lock);
638 * binder_proc_unlock() - Release spinlock for given binder_proc
639 * @proc: struct binder_proc to acquire
641 * Release lock acquired via binder_proc_lock()
643 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
645 _binder_proc_unlock(struct binder_proc *proc, int line)
646 __releases(&proc->outer_lock)
648 binder_debug(BINDER_DEBUG_SPINLOCKS,
649 "%s: line=%d\n", __func__, line);
650 spin_unlock(&proc->outer_lock);
654 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
655 * @proc: struct binder_proc to acquire
657 * Acquires proc->inner_lock. Used to protect todo lists
659 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
661 _binder_inner_proc_lock(struct binder_proc *proc, int line)
662 __acquires(&proc->inner_lock)
664 binder_debug(BINDER_DEBUG_SPINLOCKS,
665 "%s: line=%d\n", __func__, line);
666 spin_lock(&proc->inner_lock);
670 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
671 * @proc: struct binder_proc to acquire
673 * Release lock acquired via binder_inner_proc_lock()
675 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
677 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
678 __releases(&proc->inner_lock)
680 binder_debug(BINDER_DEBUG_SPINLOCKS,
681 "%s: line=%d\n", __func__, line);
682 spin_unlock(&proc->inner_lock);
686 * binder_node_lock() - Acquire spinlock for given binder_node
687 * @node: struct binder_node to acquire
689 * Acquires node->lock. Used to protect binder_node fields
691 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
693 _binder_node_lock(struct binder_node *node, int line)
694 __acquires(&node->lock)
696 binder_debug(BINDER_DEBUG_SPINLOCKS,
697 "%s: line=%d\n", __func__, line);
698 spin_lock(&node->lock);
702 * binder_node_unlock() - Release spinlock for given binder_proc
703 * @node: struct binder_node to acquire
705 * Release lock acquired via binder_node_lock()
707 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
709 _binder_node_unlock(struct binder_node *node, int line)
710 __releases(&node->lock)
712 binder_debug(BINDER_DEBUG_SPINLOCKS,
713 "%s: line=%d\n", __func__, line);
714 spin_unlock(&node->lock);
718 * binder_node_inner_lock() - Acquire node and inner locks
719 * @node: struct binder_node to acquire
721 * Acquires node->lock. If node->proc also acquires
722 * proc->inner_lock. Used to protect binder_node fields
724 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
726 _binder_node_inner_lock(struct binder_node *node, int line)
727 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
729 binder_debug(BINDER_DEBUG_SPINLOCKS,
730 "%s: line=%d\n", __func__, line);
731 spin_lock(&node->lock);
733 binder_inner_proc_lock(node->proc);
735 /* annotation for sparse */
736 __acquire(&node->proc->inner_lock);
740 * binder_node_unlock() - Release node and inner locks
741 * @node: struct binder_node to acquire
743 * Release lock acquired via binder_node_lock()
745 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
747 _binder_node_inner_unlock(struct binder_node *node, int line)
748 __releases(&node->lock) __releases(&node->proc->inner_lock)
750 struct binder_proc *proc = node->proc;
752 binder_debug(BINDER_DEBUG_SPINLOCKS,
753 "%s: line=%d\n", __func__, line);
755 binder_inner_proc_unlock(proc);
757 /* annotation for sparse */
758 __release(&node->proc->inner_lock);
759 spin_unlock(&node->lock);
762 static bool binder_worklist_empty_ilocked(struct list_head *list)
764 return list_empty(list);
768 * binder_worklist_empty() - Check if no items on the work list
769 * @proc: binder_proc associated with list
770 * @list: list to check
772 * Return: true if there are no items on list, else false
774 static bool binder_worklist_empty(struct binder_proc *proc,
775 struct list_head *list)
779 binder_inner_proc_lock(proc);
780 ret = binder_worklist_empty_ilocked(list);
781 binder_inner_proc_unlock(proc);
786 * binder_enqueue_work_ilocked() - Add an item to the work list
787 * @work: struct binder_work to add to list
788 * @target_list: list to add work to
790 * Adds the work to the specified list. Asserts that work
791 * is not already on a list.
793 * Requires the proc->inner_lock to be held.
796 binder_enqueue_work_ilocked(struct binder_work *work,
797 struct list_head *target_list)
799 BUG_ON(target_list == NULL);
800 BUG_ON(work->entry.next && !list_empty(&work->entry));
801 list_add_tail(&work->entry, target_list);
805 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
806 * @thread: thread to queue work to
807 * @work: struct binder_work to add to list
809 * Adds the work to the todo list of the thread. Doesn't set the process_todo
810 * flag, which means that (if it wasn't already set) the thread will go to
811 * sleep without handling this work when it calls read.
813 * Requires the proc->inner_lock to be held.
816 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
817 struct binder_work *work)
819 WARN_ON(!list_empty(&thread->waiting_thread_node));
820 binder_enqueue_work_ilocked(work, &thread->todo);
824 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
825 * @thread: thread to queue work to
826 * @work: struct binder_work to add to list
828 * Adds the work to the todo list of the thread, and enables processing
831 * Requires the proc->inner_lock to be held.
834 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
835 struct binder_work *work)
837 WARN_ON(!list_empty(&thread->waiting_thread_node));
838 binder_enqueue_work_ilocked(work, &thread->todo);
839 thread->process_todo = true;
843 * binder_enqueue_thread_work() - Add an item to the thread work list
844 * @thread: thread to queue work to
845 * @work: struct binder_work to add to list
847 * Adds the work to the todo list of the thread, and enables processing
851 binder_enqueue_thread_work(struct binder_thread *thread,
852 struct binder_work *work)
854 binder_inner_proc_lock(thread->proc);
855 binder_enqueue_thread_work_ilocked(thread, work);
856 binder_inner_proc_unlock(thread->proc);
860 binder_dequeue_work_ilocked(struct binder_work *work)
862 list_del_init(&work->entry);
866 * binder_dequeue_work() - Removes an item from the work list
867 * @proc: binder_proc associated with list
868 * @work: struct binder_work to remove from list
870 * Removes the specified work item from whatever list it is on.
871 * Can safely be called if work is not on any list.
874 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
876 binder_inner_proc_lock(proc);
877 binder_dequeue_work_ilocked(work);
878 binder_inner_proc_unlock(proc);
881 static struct binder_work *binder_dequeue_work_head_ilocked(
882 struct list_head *list)
884 struct binder_work *w;
886 w = list_first_entry_or_null(list, struct binder_work, entry);
888 list_del_init(&w->entry);
893 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
894 static void binder_free_thread(struct binder_thread *thread);
895 static void binder_free_proc(struct binder_proc *proc);
896 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
898 static bool binder_has_work_ilocked(struct binder_thread *thread,
901 return thread->process_todo ||
902 thread->looper_need_return ||
904 !binder_worklist_empty_ilocked(&thread->proc->todo));
907 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
911 binder_inner_proc_lock(thread->proc);
912 has_work = binder_has_work_ilocked(thread, do_proc_work);
913 binder_inner_proc_unlock(thread->proc);
918 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
920 return !thread->transaction_stack &&
921 binder_worklist_empty_ilocked(&thread->todo) &&
922 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
923 BINDER_LOOPER_STATE_REGISTERED));
926 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
930 struct binder_thread *thread;
932 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
933 thread = rb_entry(n, struct binder_thread, rb_node);
934 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
935 binder_available_for_proc_work_ilocked(thread)) {
937 wake_up_interruptible_sync(&thread->wait);
939 wake_up_interruptible(&thread->wait);
945 * binder_select_thread_ilocked() - selects a thread for doing proc work.
946 * @proc: process to select a thread from
948 * Note that calling this function moves the thread off the waiting_threads
949 * list, so it can only be woken up by the caller of this function, or a
950 * signal. Therefore, callers *should* always wake up the thread this function
953 * Return: If there's a thread currently waiting for process work,
954 * returns that thread. Otherwise returns NULL.
956 static struct binder_thread *
957 binder_select_thread_ilocked(struct binder_proc *proc)
959 struct binder_thread *thread;
961 assert_spin_locked(&proc->inner_lock);
962 thread = list_first_entry_or_null(&proc->waiting_threads,
963 struct binder_thread,
964 waiting_thread_node);
967 list_del_init(&thread->waiting_thread_node);
973 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
974 * @proc: process to wake up a thread in
975 * @thread: specific thread to wake-up (may be NULL)
976 * @sync: whether to do a synchronous wake-up
978 * This function wakes up a thread in the @proc process.
979 * The caller may provide a specific thread to wake-up in
980 * the @thread parameter. If @thread is NULL, this function
981 * will wake up threads that have called poll().
983 * Note that for this function to work as expected, callers
984 * should first call binder_select_thread() to find a thread
985 * to handle the work (if they don't have a thread already),
986 * and pass the result into the @thread parameter.
988 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
989 struct binder_thread *thread,
992 assert_spin_locked(&proc->inner_lock);
996 wake_up_interruptible_sync(&thread->wait);
998 wake_up_interruptible(&thread->wait);
1002 /* Didn't find a thread waiting for proc work; this can happen
1004 * 1. All threads are busy handling transactions
1005 * In that case, one of those threads should call back into
1006 * the kernel driver soon and pick up this work.
1007 * 2. Threads are using the (e)poll interface, in which case
1008 * they may be blocked on the waitqueue without having been
1009 * added to waiting_threads. For this case, we just iterate
1010 * over all threads not handling transaction work, and
1011 * wake them all up. We wake all because we don't know whether
1012 * a thread that called into (e)poll is handling non-binder
1015 binder_wakeup_poll_threads_ilocked(proc, sync);
1018 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1020 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1022 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1025 static void binder_set_nice(long nice)
1029 if (can_nice(current, nice)) {
1030 set_user_nice(current, nice);
1033 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1034 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1035 "%d: nice value %ld not allowed use %ld instead\n",
1036 current->pid, nice, min_nice);
1037 set_user_nice(current, min_nice);
1038 if (min_nice <= MAX_NICE)
1040 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1043 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1044 binder_uintptr_t ptr)
1046 struct rb_node *n = proc->nodes.rb_node;
1047 struct binder_node *node;
1049 assert_spin_locked(&proc->inner_lock);
1052 node = rb_entry(n, struct binder_node, rb_node);
1054 if (ptr < node->ptr)
1056 else if (ptr > node->ptr)
1060 * take an implicit weak reference
1061 * to ensure node stays alive until
1062 * call to binder_put_node()
1064 binder_inc_node_tmpref_ilocked(node);
1071 static struct binder_node *binder_get_node(struct binder_proc *proc,
1072 binder_uintptr_t ptr)
1074 struct binder_node *node;
1076 binder_inner_proc_lock(proc);
1077 node = binder_get_node_ilocked(proc, ptr);
1078 binder_inner_proc_unlock(proc);
1082 static struct binder_node *binder_init_node_ilocked(
1083 struct binder_proc *proc,
1084 struct binder_node *new_node,
1085 struct flat_binder_object *fp)
1087 struct rb_node **p = &proc->nodes.rb_node;
1088 struct rb_node *parent = NULL;
1089 struct binder_node *node;
1090 binder_uintptr_t ptr = fp ? fp->binder : 0;
1091 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1092 __u32 flags = fp ? fp->flags : 0;
1094 assert_spin_locked(&proc->inner_lock);
1099 node = rb_entry(parent, struct binder_node, rb_node);
1101 if (ptr < node->ptr)
1103 else if (ptr > node->ptr)
1104 p = &(*p)->rb_right;
1107 * A matching node is already in
1108 * the rb tree. Abandon the init
1111 binder_inc_node_tmpref_ilocked(node);
1116 binder_stats_created(BINDER_STAT_NODE);
1118 rb_link_node(&node->rb_node, parent, p);
1119 rb_insert_color(&node->rb_node, &proc->nodes);
1120 node->debug_id = atomic_inc_return(&binder_last_id);
1123 node->cookie = cookie;
1124 node->work.type = BINDER_WORK_NODE;
1125 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1126 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1127 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1128 spin_lock_init(&node->lock);
1129 INIT_LIST_HEAD(&node->work.entry);
1130 INIT_LIST_HEAD(&node->async_todo);
1131 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1132 "%d:%d node %d u%016llx c%016llx created\n",
1133 proc->pid, current->pid, node->debug_id,
1134 (u64)node->ptr, (u64)node->cookie);
1139 static struct binder_node *binder_new_node(struct binder_proc *proc,
1140 struct flat_binder_object *fp)
1142 struct binder_node *node;
1143 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1147 binder_inner_proc_lock(proc);
1148 node = binder_init_node_ilocked(proc, new_node, fp);
1149 binder_inner_proc_unlock(proc);
1150 if (node != new_node)
1152 * The node was already added by another thread
1159 static void binder_free_node(struct binder_node *node)
1162 binder_stats_deleted(BINDER_STAT_NODE);
1165 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1167 struct list_head *target_list)
1169 struct binder_proc *proc = node->proc;
1171 assert_spin_locked(&node->lock);
1173 assert_spin_locked(&proc->inner_lock);
1176 if (target_list == NULL &&
1177 node->internal_strong_refs == 0 &&
1179 node == node->proc->context->binder_context_mgr_node &&
1180 node->has_strong_ref)) {
1181 pr_err("invalid inc strong node for %d\n",
1185 node->internal_strong_refs++;
1187 node->local_strong_refs++;
1188 if (!node->has_strong_ref && target_list) {
1189 struct binder_thread *thread = container_of(target_list,
1190 struct binder_thread, todo);
1191 binder_dequeue_work_ilocked(&node->work);
1192 BUG_ON(&thread->todo != target_list);
1193 binder_enqueue_deferred_thread_work_ilocked(thread,
1198 node->local_weak_refs++;
1199 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1200 if (target_list == NULL) {
1201 pr_err("invalid inc weak node for %d\n",
1208 binder_enqueue_work_ilocked(&node->work, target_list);
1214 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1215 struct list_head *target_list)
1219 binder_node_inner_lock(node);
1220 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1221 binder_node_inner_unlock(node);
1226 static bool binder_dec_node_nilocked(struct binder_node *node,
1227 int strong, int internal)
1229 struct binder_proc *proc = node->proc;
1231 assert_spin_locked(&node->lock);
1233 assert_spin_locked(&proc->inner_lock);
1236 node->internal_strong_refs--;
1238 node->local_strong_refs--;
1239 if (node->local_strong_refs || node->internal_strong_refs)
1243 node->local_weak_refs--;
1244 if (node->local_weak_refs || node->tmp_refs ||
1245 !hlist_empty(&node->refs))
1249 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1250 if (list_empty(&node->work.entry)) {
1251 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1252 binder_wakeup_proc_ilocked(proc);
1255 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1256 !node->local_weak_refs && !node->tmp_refs) {
1258 binder_dequeue_work_ilocked(&node->work);
1259 rb_erase(&node->rb_node, &proc->nodes);
1260 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1261 "refless node %d deleted\n",
1264 BUG_ON(!list_empty(&node->work.entry));
1265 spin_lock(&binder_dead_nodes_lock);
1267 * tmp_refs could have changed so
1270 if (node->tmp_refs) {
1271 spin_unlock(&binder_dead_nodes_lock);
1274 hlist_del(&node->dead_node);
1275 spin_unlock(&binder_dead_nodes_lock);
1276 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1277 "dead node %d deleted\n",
1286 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1290 binder_node_inner_lock(node);
1291 free_node = binder_dec_node_nilocked(node, strong, internal);
1292 binder_node_inner_unlock(node);
1294 binder_free_node(node);
1297 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1300 * No call to binder_inc_node() is needed since we
1301 * don't need to inform userspace of any changes to
1308 * binder_inc_node_tmpref() - take a temporary reference on node
1309 * @node: node to reference
1311 * Take reference on node to prevent the node from being freed
1312 * while referenced only by a local variable. The inner lock is
1313 * needed to serialize with the node work on the queue (which
1314 * isn't needed after the node is dead). If the node is dead
1315 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1316 * node->tmp_refs against dead-node-only cases where the node
1317 * lock cannot be acquired (eg traversing the dead node list to
1320 static void binder_inc_node_tmpref(struct binder_node *node)
1322 binder_node_lock(node);
1324 binder_inner_proc_lock(node->proc);
1326 spin_lock(&binder_dead_nodes_lock);
1327 binder_inc_node_tmpref_ilocked(node);
1329 binder_inner_proc_unlock(node->proc);
1331 spin_unlock(&binder_dead_nodes_lock);
1332 binder_node_unlock(node);
1336 * binder_dec_node_tmpref() - remove a temporary reference on node
1337 * @node: node to reference
1339 * Release temporary reference on node taken via binder_inc_node_tmpref()
1341 static void binder_dec_node_tmpref(struct binder_node *node)
1345 binder_node_inner_lock(node);
1347 spin_lock(&binder_dead_nodes_lock);
1349 __acquire(&binder_dead_nodes_lock);
1351 BUG_ON(node->tmp_refs < 0);
1353 spin_unlock(&binder_dead_nodes_lock);
1355 __release(&binder_dead_nodes_lock);
1357 * Call binder_dec_node() to check if all refcounts are 0
1358 * and cleanup is needed. Calling with strong=0 and internal=1
1359 * causes no actual reference to be released in binder_dec_node().
1360 * If that changes, a change is needed here too.
1362 free_node = binder_dec_node_nilocked(node, 0, 1);
1363 binder_node_inner_unlock(node);
1365 binder_free_node(node);
1368 static void binder_put_node(struct binder_node *node)
1370 binder_dec_node_tmpref(node);
1373 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1374 u32 desc, bool need_strong_ref)
1376 struct rb_node *n = proc->refs_by_desc.rb_node;
1377 struct binder_ref *ref;
1380 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1382 if (desc < ref->data.desc) {
1384 } else if (desc > ref->data.desc) {
1386 } else if (need_strong_ref && !ref->data.strong) {
1387 binder_user_error("tried to use weak ref as strong ref\n");
1397 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1398 * @proc: binder_proc that owns the ref
1399 * @node: binder_node of target
1400 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1402 * Look up the ref for the given node and return it if it exists
1404 * If it doesn't exist and the caller provides a newly allocated
1405 * ref, initialize the fields of the newly allocated ref and insert
1406 * into the given proc rb_trees and node refs list.
1408 * Return: the ref for node. It is possible that another thread
1409 * allocated/initialized the ref first in which case the
1410 * returned ref would be different than the passed-in
1411 * new_ref. new_ref must be kfree'd by the caller in
1414 static struct binder_ref *binder_get_ref_for_node_olocked(
1415 struct binder_proc *proc,
1416 struct binder_node *node,
1417 struct binder_ref *new_ref)
1419 struct binder_context *context = proc->context;
1420 struct rb_node **p = &proc->refs_by_node.rb_node;
1421 struct rb_node *parent = NULL;
1422 struct binder_ref *ref;
1427 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1429 if (node < ref->node)
1431 else if (node > ref->node)
1432 p = &(*p)->rb_right;
1439 binder_stats_created(BINDER_STAT_REF);
1440 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1441 new_ref->proc = proc;
1442 new_ref->node = node;
1443 rb_link_node(&new_ref->rb_node_node, parent, p);
1444 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1446 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1447 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1448 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1449 if (ref->data.desc > new_ref->data.desc)
1451 new_ref->data.desc = ref->data.desc + 1;
1454 p = &proc->refs_by_desc.rb_node;
1457 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1459 if (new_ref->data.desc < ref->data.desc)
1461 else if (new_ref->data.desc > ref->data.desc)
1462 p = &(*p)->rb_right;
1466 rb_link_node(&new_ref->rb_node_desc, parent, p);
1467 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1469 binder_node_lock(node);
1470 hlist_add_head(&new_ref->node_entry, &node->refs);
1472 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1473 "%d new ref %d desc %d for node %d\n",
1474 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1476 binder_node_unlock(node);
1480 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1482 bool delete_node = false;
1484 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1485 "%d delete ref %d desc %d for node %d\n",
1486 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1487 ref->node->debug_id);
1489 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1490 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1492 binder_node_inner_lock(ref->node);
1493 if (ref->data.strong)
1494 binder_dec_node_nilocked(ref->node, 1, 1);
1496 hlist_del(&ref->node_entry);
1497 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1498 binder_node_inner_unlock(ref->node);
1500 * Clear ref->node unless we want the caller to free the node
1504 * The caller uses ref->node to determine
1505 * whether the node needs to be freed. Clear
1506 * it since the node is still alive.
1512 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1513 "%d delete ref %d desc %d has death notification\n",
1514 ref->proc->pid, ref->data.debug_id,
1516 binder_dequeue_work(ref->proc, &ref->death->work);
1517 binder_stats_deleted(BINDER_STAT_DEATH);
1519 binder_stats_deleted(BINDER_STAT_REF);
1523 * binder_inc_ref_olocked() - increment the ref for given handle
1524 * @ref: ref to be incremented
1525 * @strong: if true, strong increment, else weak
1526 * @target_list: list to queue node work on
1528 * Increment the ref. @ref->proc->outer_lock must be held on entry
1530 * Return: 0, if successful, else errno
1532 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1533 struct list_head *target_list)
1538 if (ref->data.strong == 0) {
1539 ret = binder_inc_node(ref->node, 1, 1, target_list);
1545 if (ref->data.weak == 0) {
1546 ret = binder_inc_node(ref->node, 0, 1, target_list);
1556 * binder_dec_ref() - dec the ref for given handle
1557 * @ref: ref to be decremented
1558 * @strong: if true, strong decrement, else weak
1560 * Decrement the ref.
1562 * Return: true if ref is cleaned up and ready to be freed
1564 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1567 if (ref->data.strong == 0) {
1568 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1569 ref->proc->pid, ref->data.debug_id,
1570 ref->data.desc, ref->data.strong,
1575 if (ref->data.strong == 0)
1576 binder_dec_node(ref->node, strong, 1);
1578 if (ref->data.weak == 0) {
1579 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1580 ref->proc->pid, ref->data.debug_id,
1581 ref->data.desc, ref->data.strong,
1587 if (ref->data.strong == 0 && ref->data.weak == 0) {
1588 binder_cleanup_ref_olocked(ref);
1595 * binder_get_node_from_ref() - get the node from the given proc/desc
1596 * @proc: proc containing the ref
1597 * @desc: the handle associated with the ref
1598 * @need_strong_ref: if true, only return node if ref is strong
1599 * @rdata: the id/refcount data for the ref
1601 * Given a proc and ref handle, return the associated binder_node
1603 * Return: a binder_node or NULL if not found or not strong when strong required
1605 static struct binder_node *binder_get_node_from_ref(
1606 struct binder_proc *proc,
1607 u32 desc, bool need_strong_ref,
1608 struct binder_ref_data *rdata)
1610 struct binder_node *node;
1611 struct binder_ref *ref;
1613 binder_proc_lock(proc);
1614 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1619 * Take an implicit reference on the node to ensure
1620 * it stays alive until the call to binder_put_node()
1622 binder_inc_node_tmpref(node);
1625 binder_proc_unlock(proc);
1630 binder_proc_unlock(proc);
1635 * binder_free_ref() - free the binder_ref
1638 * Free the binder_ref. Free the binder_node indicated by ref->node
1639 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1641 static void binder_free_ref(struct binder_ref *ref)
1644 binder_free_node(ref->node);
1650 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1651 * @proc: proc containing the ref
1652 * @desc: the handle associated with the ref
1653 * @increment: true=inc reference, false=dec reference
1654 * @strong: true=strong reference, false=weak reference
1655 * @rdata: the id/refcount data for the ref
1657 * Given a proc and ref handle, increment or decrement the ref
1658 * according to "increment" arg.
1660 * Return: 0 if successful, else errno
1662 static int binder_update_ref_for_handle(struct binder_proc *proc,
1663 uint32_t desc, bool increment, bool strong,
1664 struct binder_ref_data *rdata)
1667 struct binder_ref *ref;
1668 bool delete_ref = false;
1670 binder_proc_lock(proc);
1671 ref = binder_get_ref_olocked(proc, desc, strong);
1677 ret = binder_inc_ref_olocked(ref, strong, NULL);
1679 delete_ref = binder_dec_ref_olocked(ref, strong);
1683 binder_proc_unlock(proc);
1686 binder_free_ref(ref);
1690 binder_proc_unlock(proc);
1695 * binder_dec_ref_for_handle() - dec the ref for given handle
1696 * @proc: proc containing the ref
1697 * @desc: the handle associated with the ref
1698 * @strong: true=strong reference, false=weak reference
1699 * @rdata: the id/refcount data for the ref
1701 * Just calls binder_update_ref_for_handle() to decrement the ref.
1703 * Return: 0 if successful, else errno
1705 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1706 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1708 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1713 * binder_inc_ref_for_node() - increment the ref for given proc/node
1714 * @proc: proc containing the ref
1715 * @node: target node
1716 * @strong: true=strong reference, false=weak reference
1717 * @target_list: worklist to use if node is incremented
1718 * @rdata: the id/refcount data for the ref
1720 * Given a proc and node, increment the ref. Create the ref if it
1721 * doesn't already exist
1723 * Return: 0 if successful, else errno
1725 static int binder_inc_ref_for_node(struct binder_proc *proc,
1726 struct binder_node *node,
1728 struct list_head *target_list,
1729 struct binder_ref_data *rdata)
1731 struct binder_ref *ref;
1732 struct binder_ref *new_ref = NULL;
1735 binder_proc_lock(proc);
1736 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1738 binder_proc_unlock(proc);
1739 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1742 binder_proc_lock(proc);
1743 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1745 ret = binder_inc_ref_olocked(ref, strong, target_list);
1747 if (ret && ref == new_ref) {
1749 * Cleanup the failed reference here as the target
1750 * could now be dead and have already released its
1751 * references by now. Calling on the new reference
1752 * with strong=0 and a tmp_refs will not decrement
1753 * the node. The new_ref gets kfree'd below.
1755 binder_cleanup_ref_olocked(new_ref);
1759 binder_proc_unlock(proc);
1760 if (new_ref && ref != new_ref)
1762 * Another thread created the ref first so
1763 * free the one we allocated
1769 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1770 struct binder_transaction *t)
1772 BUG_ON(!target_thread);
1773 assert_spin_locked(&target_thread->proc->inner_lock);
1774 BUG_ON(target_thread->transaction_stack != t);
1775 BUG_ON(target_thread->transaction_stack->from != target_thread);
1776 target_thread->transaction_stack =
1777 target_thread->transaction_stack->from_parent;
1782 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1783 * @thread: thread to decrement
1785 * A thread needs to be kept alive while being used to create or
1786 * handle a transaction. binder_get_txn_from() is used to safely
1787 * extract t->from from a binder_transaction and keep the thread
1788 * indicated by t->from from being freed. When done with that
1789 * binder_thread, this function is called to decrement the
1790 * tmp_ref and free if appropriate (thread has been released
1791 * and no transaction being processed by the driver)
1793 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1796 * atomic is used to protect the counter value while
1797 * it cannot reach zero or thread->is_dead is false
1799 binder_inner_proc_lock(thread->proc);
1800 atomic_dec(&thread->tmp_ref);
1801 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1802 binder_inner_proc_unlock(thread->proc);
1803 binder_free_thread(thread);
1806 binder_inner_proc_unlock(thread->proc);
1810 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1811 * @proc: proc to decrement
1813 * A binder_proc needs to be kept alive while being used to create or
1814 * handle a transaction. proc->tmp_ref is incremented when
1815 * creating a new transaction or the binder_proc is currently in-use
1816 * by threads that are being released. When done with the binder_proc,
1817 * this function is called to decrement the counter and free the
1818 * proc if appropriate (proc has been released, all threads have
1819 * been released and not currenly in-use to process a transaction).
1821 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1823 binder_inner_proc_lock(proc);
1825 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1827 binder_inner_proc_unlock(proc);
1828 binder_free_proc(proc);
1831 binder_inner_proc_unlock(proc);
1835 * binder_get_txn_from() - safely extract the "from" thread in transaction
1836 * @t: binder transaction for t->from
1838 * Atomically return the "from" thread and increment the tmp_ref
1839 * count for the thread to ensure it stays alive until
1840 * binder_thread_dec_tmpref() is called.
1842 * Return: the value of t->from
1844 static struct binder_thread *binder_get_txn_from(
1845 struct binder_transaction *t)
1847 struct binder_thread *from;
1849 spin_lock(&t->lock);
1852 atomic_inc(&from->tmp_ref);
1853 spin_unlock(&t->lock);
1858 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1859 * @t: binder transaction for t->from
1861 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1862 * to guarantee that the thread cannot be released while operating on it.
1863 * The caller must call binder_inner_proc_unlock() to release the inner lock
1864 * as well as call binder_dec_thread_txn() to release the reference.
1866 * Return: the value of t->from
1868 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1869 struct binder_transaction *t)
1870 __acquires(&t->from->proc->inner_lock)
1872 struct binder_thread *from;
1874 from = binder_get_txn_from(t);
1876 __acquire(&from->proc->inner_lock);
1879 binder_inner_proc_lock(from->proc);
1881 BUG_ON(from != t->from);
1884 binder_inner_proc_unlock(from->proc);
1885 __acquire(&from->proc->inner_lock);
1886 binder_thread_dec_tmpref(from);
1891 * binder_free_txn_fixups() - free unprocessed fd fixups
1892 * @t: binder transaction for t->from
1894 * If the transaction is being torn down prior to being
1895 * processed by the target process, free all of the
1896 * fd fixups and fput the file structs. It is safe to
1897 * call this function after the fixups have been
1898 * processed -- in that case, the list will be empty.
1900 static void binder_free_txn_fixups(struct binder_transaction *t)
1902 struct binder_txn_fd_fixup *fixup, *tmp;
1904 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1906 list_del(&fixup->fixup_entry);
1911 static void binder_free_transaction(struct binder_transaction *t)
1913 struct binder_proc *target_proc = t->to_proc;
1916 binder_inner_proc_lock(target_proc);
1918 t->buffer->transaction = NULL;
1919 binder_inner_proc_unlock(target_proc);
1922 * If the transaction has no target_proc, then
1923 * t->buffer->transaction has already been cleared.
1925 binder_free_txn_fixups(t);
1927 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1930 static void binder_send_failed_reply(struct binder_transaction *t,
1931 uint32_t error_code)
1933 struct binder_thread *target_thread;
1934 struct binder_transaction *next;
1936 BUG_ON(t->flags & TF_ONE_WAY);
1938 target_thread = binder_get_txn_from_and_acq_inner(t);
1939 if (target_thread) {
1940 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1941 "send failed reply for transaction %d to %d:%d\n",
1943 target_thread->proc->pid,
1944 target_thread->pid);
1946 binder_pop_transaction_ilocked(target_thread, t);
1947 if (target_thread->reply_error.cmd == BR_OK) {
1948 target_thread->reply_error.cmd = error_code;
1949 binder_enqueue_thread_work_ilocked(
1951 &target_thread->reply_error.work);
1952 wake_up_interruptible(&target_thread->wait);
1955 * Cannot get here for normal operation, but
1956 * we can if multiple synchronous transactions
1957 * are sent without blocking for responses.
1958 * Just ignore the 2nd error in this case.
1960 pr_warn("Unexpected reply error: %u\n",
1961 target_thread->reply_error.cmd);
1963 binder_inner_proc_unlock(target_thread->proc);
1964 binder_thread_dec_tmpref(target_thread);
1965 binder_free_transaction(t);
1968 __release(&target_thread->proc->inner_lock);
1969 next = t->from_parent;
1971 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1972 "send failed reply for transaction %d, target dead\n",
1975 binder_free_transaction(t);
1977 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1978 "reply failed, no target thread at root\n");
1982 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1983 "reply failed, no target thread -- retry %d\n",
1989 * binder_cleanup_transaction() - cleans up undelivered transaction
1990 * @t: transaction that needs to be cleaned up
1991 * @reason: reason the transaction wasn't delivered
1992 * @error_code: error to return to caller (if synchronous call)
1994 static void binder_cleanup_transaction(struct binder_transaction *t,
1996 uint32_t error_code)
1998 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1999 binder_send_failed_reply(t, error_code);
2001 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2002 "undelivered transaction %d, %s\n",
2003 t->debug_id, reason);
2004 binder_free_transaction(t);
2009 * binder_get_object() - gets object and checks for valid metadata
2010 * @proc: binder_proc owning the buffer
2011 * @buffer: binder_buffer that we're parsing.
2012 * @offset: offset in the @buffer at which to validate an object.
2013 * @object: struct binder_object to read into
2015 * Return: If there's a valid metadata object at @offset in @buffer, the
2016 * size of that object. Otherwise, it returns zero. The object
2017 * is read into the struct binder_object pointed to by @object.
2019 static size_t binder_get_object(struct binder_proc *proc,
2020 struct binder_buffer *buffer,
2021 unsigned long offset,
2022 struct binder_object *object)
2025 struct binder_object_header *hdr;
2026 size_t object_size = 0;
2028 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2029 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
2030 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2034 /* Ok, now see if we read a complete object. */
2036 switch (hdr->type) {
2037 case BINDER_TYPE_BINDER:
2038 case BINDER_TYPE_WEAK_BINDER:
2039 case BINDER_TYPE_HANDLE:
2040 case BINDER_TYPE_WEAK_HANDLE:
2041 object_size = sizeof(struct flat_binder_object);
2043 case BINDER_TYPE_FD:
2044 object_size = sizeof(struct binder_fd_object);
2046 case BINDER_TYPE_PTR:
2047 object_size = sizeof(struct binder_buffer_object);
2049 case BINDER_TYPE_FDA:
2050 object_size = sizeof(struct binder_fd_array_object);
2055 if (offset <= buffer->data_size - object_size &&
2056 buffer->data_size >= object_size)
2063 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2064 * @proc: binder_proc owning the buffer
2065 * @b: binder_buffer containing the object
2066 * @object: struct binder_object to read into
2067 * @index: index in offset array at which the binder_buffer_object is
2069 * @start_offset: points to the start of the offset array
2070 * @object_offsetp: offset of @object read from @b
2071 * @num_valid: the number of valid offsets in the offset array
2073 * Return: If @index is within the valid range of the offset array
2074 * described by @start and @num_valid, and if there's a valid
2075 * binder_buffer_object at the offset found in index @index
2076 * of the offset array, that object is returned. Otherwise,
2077 * %NULL is returned.
2078 * Note that the offset found in index @index itself is not
2079 * verified; this function assumes that @num_valid elements
2080 * from @start were previously verified to have valid offsets.
2081 * If @object_offsetp is non-NULL, then the offset within
2082 * @b is written to it.
2084 static struct binder_buffer_object *binder_validate_ptr(
2085 struct binder_proc *proc,
2086 struct binder_buffer *b,
2087 struct binder_object *object,
2088 binder_size_t index,
2089 binder_size_t start_offset,
2090 binder_size_t *object_offsetp,
2091 binder_size_t num_valid)
2094 binder_size_t object_offset;
2095 unsigned long buffer_offset;
2097 if (index >= num_valid)
2100 buffer_offset = start_offset + sizeof(binder_size_t) * index;
2101 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2103 sizeof(object_offset)))
2105 object_size = binder_get_object(proc, b, object_offset, object);
2106 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2109 *object_offsetp = object_offset;
2111 return &object->bbo;
2115 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2116 * @proc: binder_proc owning the buffer
2117 * @b: transaction buffer
2118 * @objects_start_offset: offset to start of objects buffer
2119 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
2120 * @fixup_offset: start offset in @buffer to fix up
2121 * @last_obj_offset: offset to last binder_buffer_object that we fixed
2122 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
2124 * Return: %true if a fixup in buffer @buffer at offset @offset is
2127 * For safety reasons, we only allow fixups inside a buffer to happen
2128 * at increasing offsets; additionally, we only allow fixup on the last
2129 * buffer object that was verified, or one of its parents.
2131 * Example of what is allowed:
2134 * B (parent = A, offset = 0)
2135 * C (parent = A, offset = 16)
2136 * D (parent = C, offset = 0)
2137 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2139 * Examples of what is not allowed:
2141 * Decreasing offsets within the same parent:
2143 * C (parent = A, offset = 16)
2144 * B (parent = A, offset = 0) // decreasing offset within A
2146 * Referring to a parent that wasn't the last object or any of its parents:
2148 * B (parent = A, offset = 0)
2149 * C (parent = A, offset = 0)
2150 * C (parent = A, offset = 16)
2151 * D (parent = B, offset = 0) // B is not A or any of A's parents
2153 static bool binder_validate_fixup(struct binder_proc *proc,
2154 struct binder_buffer *b,
2155 binder_size_t objects_start_offset,
2156 binder_size_t buffer_obj_offset,
2157 binder_size_t fixup_offset,
2158 binder_size_t last_obj_offset,
2159 binder_size_t last_min_offset)
2161 if (!last_obj_offset) {
2162 /* Nothing to fix up in */
2166 while (last_obj_offset != buffer_obj_offset) {
2167 unsigned long buffer_offset;
2168 struct binder_object last_object;
2169 struct binder_buffer_object *last_bbo;
2170 size_t object_size = binder_get_object(proc, b, last_obj_offset,
2172 if (object_size != sizeof(*last_bbo))
2175 last_bbo = &last_object.bbo;
2177 * Safe to retrieve the parent of last_obj, since it
2178 * was already previously verified by the driver.
2180 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2182 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2183 buffer_offset = objects_start_offset +
2184 sizeof(binder_size_t) * last_bbo->parent;
2185 if (binder_alloc_copy_from_buffer(&proc->alloc,
2188 sizeof(last_obj_offset)))
2191 return (fixup_offset >= last_min_offset);
2195 * struct binder_task_work_cb - for deferred close
2197 * @twork: callback_head for task work
2200 * Structure to pass task work to be handled after
2201 * returning from binder_ioctl() via task_work_add().
2203 struct binder_task_work_cb {
2204 struct callback_head twork;
2209 * binder_do_fd_close() - close list of file descriptors
2210 * @twork: callback head for task work
2212 * It is not safe to call ksys_close() during the binder_ioctl()
2213 * function if there is a chance that binder's own file descriptor
2214 * might be closed. This is to meet the requirements for using
2215 * fdget() (see comments for __fget_light()). Therefore use
2216 * task_work_add() to schedule the close operation once we have
2217 * returned from binder_ioctl(). This function is a callback
2218 * for that mechanism and does the actual ksys_close() on the
2219 * given file descriptor.
2221 static void binder_do_fd_close(struct callback_head *twork)
2223 struct binder_task_work_cb *twcb = container_of(twork,
2224 struct binder_task_work_cb, twork);
2231 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2232 * @fd: file-descriptor to close
2234 * See comments in binder_do_fd_close(). This function is used to schedule
2235 * a file-descriptor to be closed after returning from binder_ioctl().
2237 static void binder_deferred_fd_close(int fd)
2239 struct binder_task_work_cb *twcb;
2241 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2244 init_task_work(&twcb->twork, binder_do_fd_close);
2245 __close_fd_get_file(fd, &twcb->file);
2247 filp_close(twcb->file, current->files);
2248 task_work_add(current, &twcb->twork, TWA_RESUME);
2254 static void binder_transaction_buffer_release(struct binder_proc *proc,
2255 struct binder_thread *thread,
2256 struct binder_buffer *buffer,
2257 binder_size_t failed_at,
2260 int debug_id = buffer->debug_id;
2261 binder_size_t off_start_offset, buffer_offset, off_end_offset;
2263 binder_debug(BINDER_DEBUG_TRANSACTION,
2264 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2265 proc->pid, buffer->debug_id,
2266 buffer->data_size, buffer->offsets_size,
2267 (unsigned long long)failed_at);
2269 if (buffer->target_node)
2270 binder_dec_node(buffer->target_node, 1, 0);
2272 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2273 off_end_offset = is_failure && failed_at ? failed_at :
2274 off_start_offset + buffer->offsets_size;
2275 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2276 buffer_offset += sizeof(binder_size_t)) {
2277 struct binder_object_header *hdr;
2278 size_t object_size = 0;
2279 struct binder_object object;
2280 binder_size_t object_offset;
2282 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2283 buffer, buffer_offset,
2284 sizeof(object_offset)))
2285 object_size = binder_get_object(proc, buffer,
2286 object_offset, &object);
2287 if (object_size == 0) {
2288 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2289 debug_id, (u64)object_offset, buffer->data_size);
2293 switch (hdr->type) {
2294 case BINDER_TYPE_BINDER:
2295 case BINDER_TYPE_WEAK_BINDER: {
2296 struct flat_binder_object *fp;
2297 struct binder_node *node;
2299 fp = to_flat_binder_object(hdr);
2300 node = binder_get_node(proc, fp->binder);
2302 pr_err("transaction release %d bad node %016llx\n",
2303 debug_id, (u64)fp->binder);
2306 binder_debug(BINDER_DEBUG_TRANSACTION,
2307 " node %d u%016llx\n",
2308 node->debug_id, (u64)node->ptr);
2309 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2311 binder_put_node(node);
2313 case BINDER_TYPE_HANDLE:
2314 case BINDER_TYPE_WEAK_HANDLE: {
2315 struct flat_binder_object *fp;
2316 struct binder_ref_data rdata;
2319 fp = to_flat_binder_object(hdr);
2320 ret = binder_dec_ref_for_handle(proc, fp->handle,
2321 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2324 pr_err("transaction release %d bad handle %d, ret = %d\n",
2325 debug_id, fp->handle, ret);
2328 binder_debug(BINDER_DEBUG_TRANSACTION,
2329 " ref %d desc %d\n",
2330 rdata.debug_id, rdata.desc);
2333 case BINDER_TYPE_FD: {
2335 * No need to close the file here since user-space
2336 * closes it for for successfully delivered
2337 * transactions. For transactions that weren't
2338 * delivered, the new fd was never allocated so
2339 * there is no need to close and the fput on the
2340 * file is done when the transaction is torn
2344 case BINDER_TYPE_PTR:
2346 * Nothing to do here, this will get cleaned up when the
2347 * transaction buffer gets freed
2350 case BINDER_TYPE_FDA: {
2351 struct binder_fd_array_object *fda;
2352 struct binder_buffer_object *parent;
2353 struct binder_object ptr_object;
2354 binder_size_t fda_offset;
2356 binder_size_t fd_buf_size;
2357 binder_size_t num_valid;
2361 * The fd fixups have not been applied so no
2362 * fds need to be closed.
2367 num_valid = (buffer_offset - off_start_offset) /
2368 sizeof(binder_size_t);
2369 fda = to_binder_fd_array_object(hdr);
2370 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2376 pr_err("transaction release %d bad parent offset\n",
2380 fd_buf_size = sizeof(u32) * fda->num_fds;
2381 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2382 pr_err("transaction release %d invalid number of fds (%lld)\n",
2383 debug_id, (u64)fda->num_fds);
2386 if (fd_buf_size > parent->length ||
2387 fda->parent_offset > parent->length - fd_buf_size) {
2388 /* No space for all file descriptors here. */
2389 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2390 debug_id, (u64)fda->num_fds);
2394 * the source data for binder_buffer_object is visible
2395 * to user-space and the @buffer element is the user
2396 * pointer to the buffer_object containing the fd_array.
2397 * Convert the address to an offset relative to
2398 * the base of the transaction buffer.
2401 (parent->buffer - (uintptr_t)buffer->user_data) +
2403 for (fd_index = 0; fd_index < fda->num_fds;
2407 binder_size_t offset = fda_offset +
2408 fd_index * sizeof(fd);
2410 err = binder_alloc_copy_from_buffer(
2411 &proc->alloc, &fd, buffer,
2412 offset, sizeof(fd));
2415 binder_deferred_fd_close(fd);
2417 * Need to make sure the thread goes
2418 * back to userspace to complete the
2422 thread->looper_need_return = true;
2427 pr_err("transaction release %d bad object type %x\n",
2428 debug_id, hdr->type);
2434 static int binder_translate_binder(struct flat_binder_object *fp,
2435 struct binder_transaction *t,
2436 struct binder_thread *thread)
2438 struct binder_node *node;
2439 struct binder_proc *proc = thread->proc;
2440 struct binder_proc *target_proc = t->to_proc;
2441 struct binder_ref_data rdata;
2444 node = binder_get_node(proc, fp->binder);
2446 node = binder_new_node(proc, fp);
2450 if (fp->cookie != node->cookie) {
2451 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2452 proc->pid, thread->pid, (u64)fp->binder,
2453 node->debug_id, (u64)fp->cookie,
2458 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2463 ret = binder_inc_ref_for_node(target_proc, node,
2464 fp->hdr.type == BINDER_TYPE_BINDER,
2465 &thread->todo, &rdata);
2469 if (fp->hdr.type == BINDER_TYPE_BINDER)
2470 fp->hdr.type = BINDER_TYPE_HANDLE;
2472 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2474 fp->handle = rdata.desc;
2477 trace_binder_transaction_node_to_ref(t, node, &rdata);
2478 binder_debug(BINDER_DEBUG_TRANSACTION,
2479 " node %d u%016llx -> ref %d desc %d\n",
2480 node->debug_id, (u64)node->ptr,
2481 rdata.debug_id, rdata.desc);
2483 binder_put_node(node);
2487 static int binder_translate_handle(struct flat_binder_object *fp,
2488 struct binder_transaction *t,
2489 struct binder_thread *thread)
2491 struct binder_proc *proc = thread->proc;
2492 struct binder_proc *target_proc = t->to_proc;
2493 struct binder_node *node;
2494 struct binder_ref_data src_rdata;
2497 node = binder_get_node_from_ref(proc, fp->handle,
2498 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2500 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2501 proc->pid, thread->pid, fp->handle);
2504 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2509 binder_node_lock(node);
2510 if (node->proc == target_proc) {
2511 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2512 fp->hdr.type = BINDER_TYPE_BINDER;
2514 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2515 fp->binder = node->ptr;
2516 fp->cookie = node->cookie;
2518 binder_inner_proc_lock(node->proc);
2520 __acquire(&node->proc->inner_lock);
2521 binder_inc_node_nilocked(node,
2522 fp->hdr.type == BINDER_TYPE_BINDER,
2525 binder_inner_proc_unlock(node->proc);
2527 __release(&node->proc->inner_lock);
2528 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2529 binder_debug(BINDER_DEBUG_TRANSACTION,
2530 " ref %d desc %d -> node %d u%016llx\n",
2531 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2533 binder_node_unlock(node);
2535 struct binder_ref_data dest_rdata;
2537 binder_node_unlock(node);
2538 ret = binder_inc_ref_for_node(target_proc, node,
2539 fp->hdr.type == BINDER_TYPE_HANDLE,
2545 fp->handle = dest_rdata.desc;
2547 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2549 binder_debug(BINDER_DEBUG_TRANSACTION,
2550 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2551 src_rdata.debug_id, src_rdata.desc,
2552 dest_rdata.debug_id, dest_rdata.desc,
2556 binder_put_node(node);
2560 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2561 struct binder_transaction *t,
2562 struct binder_thread *thread,
2563 struct binder_transaction *in_reply_to)
2565 struct binder_proc *proc = thread->proc;
2566 struct binder_proc *target_proc = t->to_proc;
2567 struct binder_txn_fd_fixup *fixup;
2570 bool target_allows_fd;
2573 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2575 target_allows_fd = t->buffer->target_node->accept_fds;
2576 if (!target_allows_fd) {
2577 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2578 proc->pid, thread->pid,
2579 in_reply_to ? "reply" : "transaction",
2582 goto err_fd_not_accepted;
2587 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2588 proc->pid, thread->pid, fd);
2592 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2599 * Add fixup record for this transaction. The allocation
2600 * of the fd in the target needs to be done from a
2603 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2609 fixup->offset = fd_offset;
2610 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2611 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2619 err_fd_not_accepted:
2623 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2624 struct binder_buffer_object *parent,
2625 struct binder_transaction *t,
2626 struct binder_thread *thread,
2627 struct binder_transaction *in_reply_to)
2629 binder_size_t fdi, fd_buf_size;
2630 binder_size_t fda_offset;
2631 struct binder_proc *proc = thread->proc;
2632 struct binder_proc *target_proc = t->to_proc;
2634 fd_buf_size = sizeof(u32) * fda->num_fds;
2635 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2636 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2637 proc->pid, thread->pid, (u64)fda->num_fds);
2640 if (fd_buf_size > parent->length ||
2641 fda->parent_offset > parent->length - fd_buf_size) {
2642 /* No space for all file descriptors here. */
2643 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2644 proc->pid, thread->pid, (u64)fda->num_fds);
2648 * the source data for binder_buffer_object is visible
2649 * to user-space and the @buffer element is the user
2650 * pointer to the buffer_object containing the fd_array.
2651 * Convert the address to an offset relative to
2652 * the base of the transaction buffer.
2654 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2656 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2657 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2658 proc->pid, thread->pid);
2661 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2664 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2666 ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
2668 offset, sizeof(fd));
2670 ret = binder_translate_fd(fd, offset, t, thread,
2673 return ret > 0 ? -EINVAL : ret;
2678 static int binder_fixup_parent(struct binder_transaction *t,
2679 struct binder_thread *thread,
2680 struct binder_buffer_object *bp,
2681 binder_size_t off_start_offset,
2682 binder_size_t num_valid,
2683 binder_size_t last_fixup_obj_off,
2684 binder_size_t last_fixup_min_off)
2686 struct binder_buffer_object *parent;
2687 struct binder_buffer *b = t->buffer;
2688 struct binder_proc *proc = thread->proc;
2689 struct binder_proc *target_proc = t->to_proc;
2690 struct binder_object object;
2691 binder_size_t buffer_offset;
2692 binder_size_t parent_offset;
2694 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2697 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2698 off_start_offset, &parent_offset,
2701 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2702 proc->pid, thread->pid);
2706 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2707 parent_offset, bp->parent_offset,
2709 last_fixup_min_off)) {
2710 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2711 proc->pid, thread->pid);
2715 if (parent->length < sizeof(binder_uintptr_t) ||
2716 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2717 /* No space for a pointer here! */
2718 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2719 proc->pid, thread->pid);
2722 buffer_offset = bp->parent_offset +
2723 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2724 if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2725 &bp->buffer, sizeof(bp->buffer))) {
2726 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2727 proc->pid, thread->pid);
2735 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2736 * @t: transaction to send
2737 * @proc: process to send the transaction to
2738 * @thread: thread in @proc to send the transaction to (may be NULL)
2740 * This function queues a transaction to the specified process. It will try
2741 * to find a thread in the target process to handle the transaction and
2742 * wake it up. If no thread is found, the work is queued to the proc
2745 * If the @thread parameter is not NULL, the transaction is always queued
2746 * to the waitlist of that specific thread.
2748 * Return: true if the transactions was successfully queued
2749 * false if the target process or thread is dead
2751 static bool binder_proc_transaction(struct binder_transaction *t,
2752 struct binder_proc *proc,
2753 struct binder_thread *thread)
2755 struct binder_node *node = t->buffer->target_node;
2756 bool oneway = !!(t->flags & TF_ONE_WAY);
2757 bool pending_async = false;
2760 binder_node_lock(node);
2763 if (node->has_async_transaction)
2764 pending_async = true;
2766 node->has_async_transaction = true;
2769 binder_inner_proc_lock(proc);
2771 if (proc->is_dead || (thread && thread->is_dead)) {
2772 binder_inner_proc_unlock(proc);
2773 binder_node_unlock(node);
2777 if (!thread && !pending_async)
2778 thread = binder_select_thread_ilocked(proc);
2781 binder_enqueue_thread_work_ilocked(thread, &t->work);
2782 else if (!pending_async)
2783 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2785 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2788 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2790 binder_inner_proc_unlock(proc);
2791 binder_node_unlock(node);
2797 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2798 * @node: struct binder_node for which to get refs
2799 * @proc: returns @node->proc if valid
2800 * @error: if no @proc then returns BR_DEAD_REPLY
2802 * User-space normally keeps the node alive when creating a transaction
2803 * since it has a reference to the target. The local strong ref keeps it
2804 * alive if the sending process dies before the target process processes
2805 * the transaction. If the source process is malicious or has a reference
2806 * counting bug, relying on the local strong ref can fail.
2808 * Since user-space can cause the local strong ref to go away, we also take
2809 * a tmpref on the node to ensure it survives while we are constructing
2810 * the transaction. We also need a tmpref on the proc while we are
2811 * constructing the transaction, so we take that here as well.
2813 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2814 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2815 * target proc has died, @error is set to BR_DEAD_REPLY
2817 static struct binder_node *binder_get_node_refs_for_txn(
2818 struct binder_node *node,
2819 struct binder_proc **procp,
2822 struct binder_node *target_node = NULL;
2824 binder_node_inner_lock(node);
2827 binder_inc_node_nilocked(node, 1, 0, NULL);
2828 binder_inc_node_tmpref_ilocked(node);
2829 node->proc->tmp_ref++;
2830 *procp = node->proc;
2832 *error = BR_DEAD_REPLY;
2833 binder_node_inner_unlock(node);
2838 static void binder_transaction(struct binder_proc *proc,
2839 struct binder_thread *thread,
2840 struct binder_transaction_data *tr, int reply,
2841 binder_size_t extra_buffers_size)
2844 struct binder_transaction *t;
2845 struct binder_work *w;
2846 struct binder_work *tcomplete;
2847 binder_size_t buffer_offset = 0;
2848 binder_size_t off_start_offset, off_end_offset;
2849 binder_size_t off_min;
2850 binder_size_t sg_buf_offset, sg_buf_end_offset;
2851 struct binder_proc *target_proc = NULL;
2852 struct binder_thread *target_thread = NULL;
2853 struct binder_node *target_node = NULL;
2854 struct binder_transaction *in_reply_to = NULL;
2855 struct binder_transaction_log_entry *e;
2856 uint32_t return_error = 0;
2857 uint32_t return_error_param = 0;
2858 uint32_t return_error_line = 0;
2859 binder_size_t last_fixup_obj_off = 0;
2860 binder_size_t last_fixup_min_off = 0;
2861 struct binder_context *context = proc->context;
2862 int t_debug_id = atomic_inc_return(&binder_last_id);
2863 char *secctx = NULL;
2866 e = binder_transaction_log_add(&binder_transaction_log);
2867 e->debug_id = t_debug_id;
2868 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2869 e->from_proc = proc->pid;
2870 e->from_thread = thread->pid;
2871 e->target_handle = tr->target.handle;
2872 e->data_size = tr->data_size;
2873 e->offsets_size = tr->offsets_size;
2874 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2877 binder_inner_proc_lock(proc);
2878 in_reply_to = thread->transaction_stack;
2879 if (in_reply_to == NULL) {
2880 binder_inner_proc_unlock(proc);
2881 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2882 proc->pid, thread->pid);
2883 return_error = BR_FAILED_REPLY;
2884 return_error_param = -EPROTO;
2885 return_error_line = __LINE__;
2886 goto err_empty_call_stack;
2888 if (in_reply_to->to_thread != thread) {
2889 spin_lock(&in_reply_to->lock);
2890 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2891 proc->pid, thread->pid, in_reply_to->debug_id,
2892 in_reply_to->to_proc ?
2893 in_reply_to->to_proc->pid : 0,
2894 in_reply_to->to_thread ?
2895 in_reply_to->to_thread->pid : 0);
2896 spin_unlock(&in_reply_to->lock);
2897 binder_inner_proc_unlock(proc);
2898 return_error = BR_FAILED_REPLY;
2899 return_error_param = -EPROTO;
2900 return_error_line = __LINE__;
2902 goto err_bad_call_stack;
2904 thread->transaction_stack = in_reply_to->to_parent;
2905 binder_inner_proc_unlock(proc);
2906 binder_set_nice(in_reply_to->saved_priority);
2907 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2908 if (target_thread == NULL) {
2909 /* annotation for sparse */
2910 __release(&target_thread->proc->inner_lock);
2911 return_error = BR_DEAD_REPLY;
2912 return_error_line = __LINE__;
2913 goto err_dead_binder;
2915 if (target_thread->transaction_stack != in_reply_to) {
2916 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2917 proc->pid, thread->pid,
2918 target_thread->transaction_stack ?
2919 target_thread->transaction_stack->debug_id : 0,
2920 in_reply_to->debug_id);
2921 binder_inner_proc_unlock(target_thread->proc);
2922 return_error = BR_FAILED_REPLY;
2923 return_error_param = -EPROTO;
2924 return_error_line = __LINE__;
2926 target_thread = NULL;
2927 goto err_dead_binder;
2929 target_proc = target_thread->proc;
2930 target_proc->tmp_ref++;
2931 binder_inner_proc_unlock(target_thread->proc);
2933 if (tr->target.handle) {
2934 struct binder_ref *ref;
2937 * There must already be a strong ref
2938 * on this node. If so, do a strong
2939 * increment on the node to ensure it
2940 * stays alive until the transaction is
2943 binder_proc_lock(proc);
2944 ref = binder_get_ref_olocked(proc, tr->target.handle,
2947 target_node = binder_get_node_refs_for_txn(
2948 ref->node, &target_proc,
2951 binder_user_error("%d:%d got transaction to invalid handle\n",
2952 proc->pid, thread->pid);
2953 return_error = BR_FAILED_REPLY;
2955 binder_proc_unlock(proc);
2957 mutex_lock(&context->context_mgr_node_lock);
2958 target_node = context->binder_context_mgr_node;
2960 target_node = binder_get_node_refs_for_txn(
2961 target_node, &target_proc,
2964 return_error = BR_DEAD_REPLY;
2965 mutex_unlock(&context->context_mgr_node_lock);
2966 if (target_node && target_proc->pid == proc->pid) {
2967 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2968 proc->pid, thread->pid);
2969 return_error = BR_FAILED_REPLY;
2970 return_error_param = -EINVAL;
2971 return_error_line = __LINE__;
2972 goto err_invalid_target_handle;
2977 * return_error is set above
2979 return_error_param = -EINVAL;
2980 return_error_line = __LINE__;
2981 goto err_dead_binder;
2983 e->to_node = target_node->debug_id;
2984 if (WARN_ON(proc == target_proc)) {
2985 return_error = BR_FAILED_REPLY;
2986 return_error_param = -EINVAL;
2987 return_error_line = __LINE__;
2988 goto err_invalid_target_handle;
2990 if (security_binder_transaction(proc->cred,
2991 target_proc->cred) < 0) {
2992 return_error = BR_FAILED_REPLY;
2993 return_error_param = -EPERM;
2994 return_error_line = __LINE__;
2995 goto err_invalid_target_handle;
2997 binder_inner_proc_lock(proc);
2999 w = list_first_entry_or_null(&thread->todo,
3000 struct binder_work, entry);
3001 if (!(tr->flags & TF_ONE_WAY) && w &&
3002 w->type == BINDER_WORK_TRANSACTION) {
3004 * Do not allow new outgoing transaction from a
3005 * thread that has a transaction at the head of
3006 * its todo list. Only need to check the head
3007 * because binder_select_thread_ilocked picks a
3008 * thread from proc->waiting_threads to enqueue
3009 * the transaction, and nothing is queued to the
3010 * todo list while the thread is on waiting_threads.
3012 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3013 proc->pid, thread->pid);
3014 binder_inner_proc_unlock(proc);
3015 return_error = BR_FAILED_REPLY;
3016 return_error_param = -EPROTO;
3017 return_error_line = __LINE__;
3018 goto err_bad_todo_list;
3021 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3022 struct binder_transaction *tmp;
3024 tmp = thread->transaction_stack;
3025 if (tmp->to_thread != thread) {
3026 spin_lock(&tmp->lock);
3027 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3028 proc->pid, thread->pid, tmp->debug_id,
3029 tmp->to_proc ? tmp->to_proc->pid : 0,
3031 tmp->to_thread->pid : 0);
3032 spin_unlock(&tmp->lock);
3033 binder_inner_proc_unlock(proc);
3034 return_error = BR_FAILED_REPLY;
3035 return_error_param = -EPROTO;
3036 return_error_line = __LINE__;
3037 goto err_bad_call_stack;
3040 struct binder_thread *from;
3042 spin_lock(&tmp->lock);
3044 if (from && from->proc == target_proc) {
3045 atomic_inc(&from->tmp_ref);
3046 target_thread = from;
3047 spin_unlock(&tmp->lock);
3050 spin_unlock(&tmp->lock);
3051 tmp = tmp->from_parent;
3054 binder_inner_proc_unlock(proc);
3057 e->to_thread = target_thread->pid;
3058 e->to_proc = target_proc->pid;
3060 /* TODO: reuse incoming transaction for reply */
3061 t = kzalloc(sizeof(*t), GFP_KERNEL);
3063 return_error = BR_FAILED_REPLY;
3064 return_error_param = -ENOMEM;
3065 return_error_line = __LINE__;
3066 goto err_alloc_t_failed;
3068 INIT_LIST_HEAD(&t->fd_fixups);
3069 binder_stats_created(BINDER_STAT_TRANSACTION);
3070 spin_lock_init(&t->lock);
3072 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3073 if (tcomplete == NULL) {
3074 return_error = BR_FAILED_REPLY;
3075 return_error_param = -ENOMEM;
3076 return_error_line = __LINE__;
3077 goto err_alloc_tcomplete_failed;
3079 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3081 t->debug_id = t_debug_id;
3084 binder_debug(BINDER_DEBUG_TRANSACTION,
3085 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3086 proc->pid, thread->pid, t->debug_id,
3087 target_proc->pid, target_thread->pid,
3088 (u64)tr->data.ptr.buffer,
3089 (u64)tr->data.ptr.offsets,
3090 (u64)tr->data_size, (u64)tr->offsets_size,
3091 (u64)extra_buffers_size);
3093 binder_debug(BINDER_DEBUG_TRANSACTION,
3094 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3095 proc->pid, thread->pid, t->debug_id,
3096 target_proc->pid, target_node->debug_id,
3097 (u64)tr->data.ptr.buffer,
3098 (u64)tr->data.ptr.offsets,
3099 (u64)tr->data_size, (u64)tr->offsets_size,
3100 (u64)extra_buffers_size);
3102 if (!reply && !(tr->flags & TF_ONE_WAY))
3106 t->sender_euid = task_euid(proc->tsk);
3107 t->to_proc = target_proc;
3108 t->to_thread = target_thread;
3110 t->flags = tr->flags;
3111 t->priority = task_nice(current);
3113 if (target_node && target_node->txn_security_ctx) {
3117 security_cred_getsecid(proc->cred, &secid);
3118 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3120 return_error = BR_FAILED_REPLY;
3121 return_error_param = ret;
3122 return_error_line = __LINE__;
3123 goto err_get_secctx_failed;
3125 added_size = ALIGN(secctx_sz, sizeof(u64));
3126 extra_buffers_size += added_size;
3127 if (extra_buffers_size < added_size) {
3128 /* integer overflow of extra_buffers_size */
3129 return_error = BR_FAILED_REPLY;
3130 return_error_param = EINVAL;
3131 return_error_line = __LINE__;
3132 goto err_bad_extra_size;
3136 trace_binder_transaction(reply, t, target_node);
3138 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3139 tr->offsets_size, extra_buffers_size,
3140 !reply && (t->flags & TF_ONE_WAY), current->tgid);
3141 if (IS_ERR(t->buffer)) {
3143 * -ESRCH indicates VMA cleared. The target is dying.
3145 return_error_param = PTR_ERR(t->buffer);
3146 return_error = return_error_param == -ESRCH ?
3147 BR_DEAD_REPLY : BR_FAILED_REPLY;
3148 return_error_line = __LINE__;
3150 goto err_binder_alloc_buf_failed;
3154 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3155 ALIGN(tr->offsets_size, sizeof(void *)) +
3156 ALIGN(extra_buffers_size, sizeof(void *)) -
3157 ALIGN(secctx_sz, sizeof(u64));
3159 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3160 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3161 t->buffer, buf_offset,
3164 t->security_ctx = 0;
3167 security_release_secctx(secctx, secctx_sz);
3170 t->buffer->debug_id = t->debug_id;
3171 t->buffer->transaction = t;
3172 t->buffer->target_node = target_node;
3173 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3174 trace_binder_transaction_alloc_buf(t->buffer);
3176 if (binder_alloc_copy_user_to_buffer(
3177 &target_proc->alloc,
3179 (const void __user *)
3180 (uintptr_t)tr->data.ptr.buffer,
3182 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3183 proc->pid, thread->pid);
3184 return_error = BR_FAILED_REPLY;
3185 return_error_param = -EFAULT;
3186 return_error_line = __LINE__;
3187 goto err_copy_data_failed;
3189 if (binder_alloc_copy_user_to_buffer(
3190 &target_proc->alloc,
3192 ALIGN(tr->data_size, sizeof(void *)),
3193 (const void __user *)
3194 (uintptr_t)tr->data.ptr.offsets,
3195 tr->offsets_size)) {
3196 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3197 proc->pid, thread->pid);
3198 return_error = BR_FAILED_REPLY;
3199 return_error_param = -EFAULT;
3200 return_error_line = __LINE__;
3201 goto err_copy_data_failed;
3203 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3204 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3205 proc->pid, thread->pid, (u64)tr->offsets_size);
3206 return_error = BR_FAILED_REPLY;
3207 return_error_param = -EINVAL;
3208 return_error_line = __LINE__;
3209 goto err_bad_offset;
3211 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3212 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3213 proc->pid, thread->pid,
3214 (u64)extra_buffers_size);
3215 return_error = BR_FAILED_REPLY;
3216 return_error_param = -EINVAL;
3217 return_error_line = __LINE__;
3218 goto err_bad_offset;
3220 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3221 buffer_offset = off_start_offset;
3222 off_end_offset = off_start_offset + tr->offsets_size;
3223 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3224 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3225 ALIGN(secctx_sz, sizeof(u64));
3227 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3228 buffer_offset += sizeof(binder_size_t)) {
3229 struct binder_object_header *hdr;
3231 struct binder_object object;
3232 binder_size_t object_offset;
3234 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3238 sizeof(object_offset))) {
3239 return_error = BR_FAILED_REPLY;
3240 return_error_param = -EINVAL;
3241 return_error_line = __LINE__;
3242 goto err_bad_offset;
3244 object_size = binder_get_object(target_proc, t->buffer,
3245 object_offset, &object);
3246 if (object_size == 0 || object_offset < off_min) {
3247 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3248 proc->pid, thread->pid,
3251 (u64)t->buffer->data_size);
3252 return_error = BR_FAILED_REPLY;
3253 return_error_param = -EINVAL;
3254 return_error_line = __LINE__;
3255 goto err_bad_offset;
3259 off_min = object_offset + object_size;
3260 switch (hdr->type) {
3261 case BINDER_TYPE_BINDER:
3262 case BINDER_TYPE_WEAK_BINDER: {
3263 struct flat_binder_object *fp;
3265 fp = to_flat_binder_object(hdr);
3266 ret = binder_translate_binder(fp, t, thread);
3269 binder_alloc_copy_to_buffer(&target_proc->alloc,
3273 return_error = BR_FAILED_REPLY;
3274 return_error_param = ret;
3275 return_error_line = __LINE__;
3276 goto err_translate_failed;
3279 case BINDER_TYPE_HANDLE:
3280 case BINDER_TYPE_WEAK_HANDLE: {
3281 struct flat_binder_object *fp;
3283 fp = to_flat_binder_object(hdr);
3284 ret = binder_translate_handle(fp, t, thread);
3286 binder_alloc_copy_to_buffer(&target_proc->alloc,
3290 return_error = BR_FAILED_REPLY;
3291 return_error_param = ret;
3292 return_error_line = __LINE__;
3293 goto err_translate_failed;
3297 case BINDER_TYPE_FD: {
3298 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3299 binder_size_t fd_offset = object_offset +
3300 (uintptr_t)&fp->fd - (uintptr_t)fp;
3301 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3302 thread, in_reply_to);
3306 binder_alloc_copy_to_buffer(&target_proc->alloc,
3310 return_error = BR_FAILED_REPLY;
3311 return_error_param = ret;
3312 return_error_line = __LINE__;
3313 goto err_translate_failed;
3316 case BINDER_TYPE_FDA: {
3317 struct binder_object ptr_object;
3318 binder_size_t parent_offset;
3319 struct binder_fd_array_object *fda =
3320 to_binder_fd_array_object(hdr);
3321 size_t num_valid = (buffer_offset - off_start_offset) /
3322 sizeof(binder_size_t);
3323 struct binder_buffer_object *parent =
3324 binder_validate_ptr(target_proc, t->buffer,
3325 &ptr_object, fda->parent,
3330 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3331 proc->pid, thread->pid);
3332 return_error = BR_FAILED_REPLY;
3333 return_error_param = -EINVAL;
3334 return_error_line = __LINE__;
3335 goto err_bad_parent;
3337 if (!binder_validate_fixup(target_proc, t->buffer,
3342 last_fixup_min_off)) {
3343 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3344 proc->pid, thread->pid);
3345 return_error = BR_FAILED_REPLY;
3346 return_error_param = -EINVAL;
3347 return_error_line = __LINE__;
3348 goto err_bad_parent;
3350 ret = binder_translate_fd_array(fda, parent, t, thread,
3353 return_error = BR_FAILED_REPLY;
3354 return_error_param = ret;
3355 return_error_line = __LINE__;
3356 goto err_translate_failed;
3358 last_fixup_obj_off = parent_offset;
3359 last_fixup_min_off =
3360 fda->parent_offset + sizeof(u32) * fda->num_fds;
3362 case BINDER_TYPE_PTR: {
3363 struct binder_buffer_object *bp =
3364 to_binder_buffer_object(hdr);
3365 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3368 if (bp->length > buf_left) {
3369 binder_user_error("%d:%d got transaction with too large buffer\n",
3370 proc->pid, thread->pid);
3371 return_error = BR_FAILED_REPLY;
3372 return_error_param = -EINVAL;
3373 return_error_line = __LINE__;
3374 goto err_bad_offset;
3376 if (binder_alloc_copy_user_to_buffer(
3377 &target_proc->alloc,
3380 (const void __user *)
3381 (uintptr_t)bp->buffer,
3383 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3384 proc->pid, thread->pid);
3385 return_error_param = -EFAULT;
3386 return_error = BR_FAILED_REPLY;
3387 return_error_line = __LINE__;
3388 goto err_copy_data_failed;
3390 /* Fixup buffer pointer to target proc address space */
3391 bp->buffer = (uintptr_t)
3392 t->buffer->user_data + sg_buf_offset;
3393 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3395 num_valid = (buffer_offset - off_start_offset) /
3396 sizeof(binder_size_t);
3397 ret = binder_fixup_parent(t, thread, bp,
3401 last_fixup_min_off);
3403 binder_alloc_copy_to_buffer(&target_proc->alloc,
3407 return_error = BR_FAILED_REPLY;
3408 return_error_param = ret;
3409 return_error_line = __LINE__;
3410 goto err_translate_failed;
3412 last_fixup_obj_off = object_offset;
3413 last_fixup_min_off = 0;
3416 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3417 proc->pid, thread->pid, hdr->type);
3418 return_error = BR_FAILED_REPLY;
3419 return_error_param = -EINVAL;
3420 return_error_line = __LINE__;
3421 goto err_bad_object_type;
3424 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3425 t->work.type = BINDER_WORK_TRANSACTION;
3428 binder_enqueue_thread_work(thread, tcomplete);
3429 binder_inner_proc_lock(target_proc);
3430 if (target_thread->is_dead) {
3431 binder_inner_proc_unlock(target_proc);
3432 goto err_dead_proc_or_thread;
3434 BUG_ON(t->buffer->async_transaction != 0);
3435 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3436 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3437 binder_inner_proc_unlock(target_proc);
3438 wake_up_interruptible_sync(&target_thread->wait);
3439 binder_free_transaction(in_reply_to);
3440 } else if (!(t->flags & TF_ONE_WAY)) {
3441 BUG_ON(t->buffer->async_transaction != 0);
3442 binder_inner_proc_lock(proc);
3444 * Defer the TRANSACTION_COMPLETE, so we don't return to
3445 * userspace immediately; this allows the target process to
3446 * immediately start processing this transaction, reducing
3447 * latency. We will then return the TRANSACTION_COMPLETE when
3448 * the target replies (or there is an error).
3450 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3452 t->from_parent = thread->transaction_stack;
3453 thread->transaction_stack = t;
3454 binder_inner_proc_unlock(proc);
3455 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3456 binder_inner_proc_lock(proc);
3457 binder_pop_transaction_ilocked(thread, t);
3458 binder_inner_proc_unlock(proc);
3459 goto err_dead_proc_or_thread;
3462 BUG_ON(target_node == NULL);
3463 BUG_ON(t->buffer->async_transaction != 1);
3464 binder_enqueue_thread_work(thread, tcomplete);
3465 if (!binder_proc_transaction(t, target_proc, NULL))
3466 goto err_dead_proc_or_thread;
3469 binder_thread_dec_tmpref(target_thread);
3470 binder_proc_dec_tmpref(target_proc);
3472 binder_dec_node_tmpref(target_node);
3474 * write barrier to synchronize with initialization
3478 WRITE_ONCE(e->debug_id_done, t_debug_id);
3481 err_dead_proc_or_thread:
3482 return_error = BR_DEAD_REPLY;
3483 return_error_line = __LINE__;
3484 binder_dequeue_work(proc, tcomplete);
3485 err_translate_failed:
3486 err_bad_object_type:
3489 err_copy_data_failed:
3490 binder_free_txn_fixups(t);
3491 trace_binder_transaction_failed_buffer_release(t->buffer);
3492 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3493 buffer_offset, true);
3495 binder_dec_node_tmpref(target_node);
3497 t->buffer->transaction = NULL;
3498 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3499 err_binder_alloc_buf_failed:
3502 security_release_secctx(secctx, secctx_sz);
3503 err_get_secctx_failed:
3505 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3506 err_alloc_tcomplete_failed:
3508 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3512 err_empty_call_stack:
3514 err_invalid_target_handle:
3516 binder_thread_dec_tmpref(target_thread);
3518 binder_proc_dec_tmpref(target_proc);
3520 binder_dec_node(target_node, 1, 0);
3521 binder_dec_node_tmpref(target_node);
3524 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3525 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3526 proc->pid, thread->pid, return_error, return_error_param,
3527 (u64)tr->data_size, (u64)tr->offsets_size,
3531 struct binder_transaction_log_entry *fe;
3533 e->return_error = return_error;
3534 e->return_error_param = return_error_param;
3535 e->return_error_line = return_error_line;
3536 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3539 * write barrier to synchronize with initialization
3543 WRITE_ONCE(e->debug_id_done, t_debug_id);
3544 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3547 BUG_ON(thread->return_error.cmd != BR_OK);
3549 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3550 binder_enqueue_thread_work(thread, &thread->return_error.work);
3551 binder_send_failed_reply(in_reply_to, return_error);
3553 thread->return_error.cmd = return_error;
3554 binder_enqueue_thread_work(thread, &thread->return_error.work);
3559 * binder_free_buf() - free the specified buffer
3560 * @proc: binder proc that owns buffer
3561 * @buffer: buffer to be freed
3562 * @is_failure: failed to send transaction
3564 * If buffer for an async transaction, enqueue the next async
3565 * transaction from the node.
3567 * Cleanup buffer and free it.
3570 binder_free_buf(struct binder_proc *proc,
3571 struct binder_thread *thread,
3572 struct binder_buffer *buffer, bool is_failure)
3574 binder_inner_proc_lock(proc);
3575 if (buffer->transaction) {
3576 buffer->transaction->buffer = NULL;
3577 buffer->transaction = NULL;
3579 binder_inner_proc_unlock(proc);
3580 if (buffer->async_transaction && buffer->target_node) {
3581 struct binder_node *buf_node;
3582 struct binder_work *w;
3584 buf_node = buffer->target_node;
3585 binder_node_inner_lock(buf_node);
3586 BUG_ON(!buf_node->has_async_transaction);
3587 BUG_ON(buf_node->proc != proc);
3588 w = binder_dequeue_work_head_ilocked(
3589 &buf_node->async_todo);
3591 buf_node->has_async_transaction = false;
3593 binder_enqueue_work_ilocked(
3595 binder_wakeup_proc_ilocked(proc);
3597 binder_node_inner_unlock(buf_node);
3599 trace_binder_transaction_buffer_release(buffer);
3600 binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
3601 binder_alloc_free_buf(&proc->alloc, buffer);
3604 static int binder_thread_write(struct binder_proc *proc,
3605 struct binder_thread *thread,
3606 binder_uintptr_t binder_buffer, size_t size,
3607 binder_size_t *consumed)
3610 struct binder_context *context = proc->context;
3611 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3612 void __user *ptr = buffer + *consumed;
3613 void __user *end = buffer + size;
3615 while (ptr < end && thread->return_error.cmd == BR_OK) {
3618 if (get_user(cmd, (uint32_t __user *)ptr))
3620 ptr += sizeof(uint32_t);
3621 trace_binder_command(cmd);
3622 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3623 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3624 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3625 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3633 const char *debug_string;
3634 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3635 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3636 struct binder_ref_data rdata;
3638 if (get_user(target, (uint32_t __user *)ptr))
3641 ptr += sizeof(uint32_t);
3643 if (increment && !target) {
3644 struct binder_node *ctx_mgr_node;
3645 mutex_lock(&context->context_mgr_node_lock);
3646 ctx_mgr_node = context->binder_context_mgr_node;
3648 if (ctx_mgr_node->proc == proc) {
3649 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3650 proc->pid, thread->pid);
3651 mutex_unlock(&context->context_mgr_node_lock);
3654 ret = binder_inc_ref_for_node(
3656 strong, NULL, &rdata);
3658 mutex_unlock(&context->context_mgr_node_lock);
3661 ret = binder_update_ref_for_handle(
3662 proc, target, increment, strong,
3664 if (!ret && rdata.desc != target) {
3665 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3666 proc->pid, thread->pid,
3667 target, rdata.desc);
3671 debug_string = "IncRefs";
3674 debug_string = "Acquire";
3677 debug_string = "Release";
3681 debug_string = "DecRefs";
3685 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3686 proc->pid, thread->pid, debug_string,
3687 strong, target, ret);
3690 binder_debug(BINDER_DEBUG_USER_REFS,
3691 "%d:%d %s ref %d desc %d s %d w %d\n",
3692 proc->pid, thread->pid, debug_string,
3693 rdata.debug_id, rdata.desc, rdata.strong,
3697 case BC_INCREFS_DONE:
3698 case BC_ACQUIRE_DONE: {
3699 binder_uintptr_t node_ptr;
3700 binder_uintptr_t cookie;
3701 struct binder_node *node;
3704 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3706 ptr += sizeof(binder_uintptr_t);
3707 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3709 ptr += sizeof(binder_uintptr_t);
3710 node = binder_get_node(proc, node_ptr);
3712 binder_user_error("%d:%d %s u%016llx no match\n",
3713 proc->pid, thread->pid,
3714 cmd == BC_INCREFS_DONE ?
3720 if (cookie != node->cookie) {
3721 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3722 proc->pid, thread->pid,
3723 cmd == BC_INCREFS_DONE ?
3724 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3725 (u64)node_ptr, node->debug_id,
3726 (u64)cookie, (u64)node->cookie);
3727 binder_put_node(node);
3730 binder_node_inner_lock(node);
3731 if (cmd == BC_ACQUIRE_DONE) {
3732 if (node->pending_strong_ref == 0) {
3733 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3734 proc->pid, thread->pid,
3736 binder_node_inner_unlock(node);
3737 binder_put_node(node);
3740 node->pending_strong_ref = 0;
3742 if (node->pending_weak_ref == 0) {
3743 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3744 proc->pid, thread->pid,
3746 binder_node_inner_unlock(node);
3747 binder_put_node(node);
3750 node->pending_weak_ref = 0;
3752 free_node = binder_dec_node_nilocked(node,
3753 cmd == BC_ACQUIRE_DONE, 0);
3755 binder_debug(BINDER_DEBUG_USER_REFS,
3756 "%d:%d %s node %d ls %d lw %d tr %d\n",
3757 proc->pid, thread->pid,
3758 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3759 node->debug_id, node->local_strong_refs,
3760 node->local_weak_refs, node->tmp_refs);
3761 binder_node_inner_unlock(node);
3762 binder_put_node(node);
3765 case BC_ATTEMPT_ACQUIRE:
3766 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3768 case BC_ACQUIRE_RESULT:
3769 pr_err("BC_ACQUIRE_RESULT not supported\n");
3772 case BC_FREE_BUFFER: {
3773 binder_uintptr_t data_ptr;
3774 struct binder_buffer *buffer;
3776 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3778 ptr += sizeof(binder_uintptr_t);
3780 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3782 if (IS_ERR_OR_NULL(buffer)) {
3783 if (PTR_ERR(buffer) == -EPERM) {
3785 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3786 proc->pid, thread->pid,
3790 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3791 proc->pid, thread->pid,
3796 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3797 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3798 proc->pid, thread->pid, (u64)data_ptr,
3800 buffer->transaction ? "active" : "finished");
3801 binder_free_buf(proc, thread, buffer, false);
3805 case BC_TRANSACTION_SG:
3807 struct binder_transaction_data_sg tr;
3809 if (copy_from_user(&tr, ptr, sizeof(tr)))
3812 binder_transaction(proc, thread, &tr.transaction_data,
3813 cmd == BC_REPLY_SG, tr.buffers_size);
3816 case BC_TRANSACTION:
3818 struct binder_transaction_data tr;
3820 if (copy_from_user(&tr, ptr, sizeof(tr)))
3823 binder_transaction(proc, thread, &tr,
3824 cmd == BC_REPLY, 0);
3828 case BC_REGISTER_LOOPER:
3829 binder_debug(BINDER_DEBUG_THREADS,
3830 "%d:%d BC_REGISTER_LOOPER\n",
3831 proc->pid, thread->pid);
3832 binder_inner_proc_lock(proc);
3833 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3834 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3835 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3836 proc->pid, thread->pid);
3837 } else if (proc->requested_threads == 0) {
3838 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3839 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3840 proc->pid, thread->pid);
3842 proc->requested_threads--;
3843 proc->requested_threads_started++;
3845 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3846 binder_inner_proc_unlock(proc);
3848 case BC_ENTER_LOOPER:
3849 binder_debug(BINDER_DEBUG_THREADS,
3850 "%d:%d BC_ENTER_LOOPER\n",
3851 proc->pid, thread->pid);
3852 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3853 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3854 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3855 proc->pid, thread->pid);
3857 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3859 case BC_EXIT_LOOPER:
3860 binder_debug(BINDER_DEBUG_THREADS,
3861 "%d:%d BC_EXIT_LOOPER\n",
3862 proc->pid, thread->pid);
3863 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3866 case BC_REQUEST_DEATH_NOTIFICATION:
3867 case BC_CLEAR_DEATH_NOTIFICATION: {
3869 binder_uintptr_t cookie;
3870 struct binder_ref *ref;
3871 struct binder_ref_death *death = NULL;
3873 if (get_user(target, (uint32_t __user *)ptr))
3875 ptr += sizeof(uint32_t);
3876 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3878 ptr += sizeof(binder_uintptr_t);
3879 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3881 * Allocate memory for death notification
3882 * before taking lock
3884 death = kzalloc(sizeof(*death), GFP_KERNEL);
3885 if (death == NULL) {
3886 WARN_ON(thread->return_error.cmd !=
3888 thread->return_error.cmd = BR_ERROR;
3889 binder_enqueue_thread_work(
3891 &thread->return_error.work);
3893 BINDER_DEBUG_FAILED_TRANSACTION,
3894 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3895 proc->pid, thread->pid);
3899 binder_proc_lock(proc);
3900 ref = binder_get_ref_olocked(proc, target, false);
3902 binder_user_error("%d:%d %s invalid ref %d\n",
3903 proc->pid, thread->pid,
3904 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3905 "BC_REQUEST_DEATH_NOTIFICATION" :
3906 "BC_CLEAR_DEATH_NOTIFICATION",
3908 binder_proc_unlock(proc);
3913 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3914 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3915 proc->pid, thread->pid,
3916 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3917 "BC_REQUEST_DEATH_NOTIFICATION" :
3918 "BC_CLEAR_DEATH_NOTIFICATION",
3919 (u64)cookie, ref->data.debug_id,
3920 ref->data.desc, ref->data.strong,
3921 ref->data.weak, ref->node->debug_id);
3923 binder_node_lock(ref->node);
3924 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3926 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3927 proc->pid, thread->pid);
3928 binder_node_unlock(ref->node);
3929 binder_proc_unlock(proc);
3933 binder_stats_created(BINDER_STAT_DEATH);
3934 INIT_LIST_HEAD(&death->work.entry);
3935 death->cookie = cookie;
3937 if (ref->node->proc == NULL) {
3938 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3940 binder_inner_proc_lock(proc);
3941 binder_enqueue_work_ilocked(
3942 &ref->death->work, &proc->todo);
3943 binder_wakeup_proc_ilocked(proc);
3944 binder_inner_proc_unlock(proc);
3947 if (ref->death == NULL) {
3948 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3949 proc->pid, thread->pid);
3950 binder_node_unlock(ref->node);
3951 binder_proc_unlock(proc);
3955 if (death->cookie != cookie) {
3956 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3957 proc->pid, thread->pid,
3960 binder_node_unlock(ref->node);
3961 binder_proc_unlock(proc);
3965 binder_inner_proc_lock(proc);
3966 if (list_empty(&death->work.entry)) {
3967 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3968 if (thread->looper &
3969 (BINDER_LOOPER_STATE_REGISTERED |
3970 BINDER_LOOPER_STATE_ENTERED))
3971 binder_enqueue_thread_work_ilocked(
3975 binder_enqueue_work_ilocked(
3978 binder_wakeup_proc_ilocked(
3982 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3983 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3985 binder_inner_proc_unlock(proc);
3987 binder_node_unlock(ref->node);
3988 binder_proc_unlock(proc);
3990 case BC_DEAD_BINDER_DONE: {
3991 struct binder_work *w;
3992 binder_uintptr_t cookie;
3993 struct binder_ref_death *death = NULL;
3995 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3998 ptr += sizeof(cookie);
3999 binder_inner_proc_lock(proc);
4000 list_for_each_entry(w, &proc->delivered_death,
4002 struct binder_ref_death *tmp_death =
4004 struct binder_ref_death,
4007 if (tmp_death->cookie == cookie) {
4012 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4013 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4014 proc->pid, thread->pid, (u64)cookie,
4016 if (death == NULL) {
4017 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4018 proc->pid, thread->pid, (u64)cookie);
4019 binder_inner_proc_unlock(proc);
4022 binder_dequeue_work_ilocked(&death->work);
4023 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4024 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4025 if (thread->looper &
4026 (BINDER_LOOPER_STATE_REGISTERED |
4027 BINDER_LOOPER_STATE_ENTERED))
4028 binder_enqueue_thread_work_ilocked(
4029 thread, &death->work);
4031 binder_enqueue_work_ilocked(
4034 binder_wakeup_proc_ilocked(proc);
4037 binder_inner_proc_unlock(proc);
4041 pr_err("%d:%d unknown command %d\n",
4042 proc->pid, thread->pid, cmd);
4045 *consumed = ptr - buffer;
4050 static void binder_stat_br(struct binder_proc *proc,
4051 struct binder_thread *thread, uint32_t cmd)
4053 trace_binder_return(cmd);
4054 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4055 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4056 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4057 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4061 static int binder_put_node_cmd(struct binder_proc *proc,
4062 struct binder_thread *thread,
4064 binder_uintptr_t node_ptr,
4065 binder_uintptr_t node_cookie,
4067 uint32_t cmd, const char *cmd_name)
4069 void __user *ptr = *ptrp;
4071 if (put_user(cmd, (uint32_t __user *)ptr))
4073 ptr += sizeof(uint32_t);
4075 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4077 ptr += sizeof(binder_uintptr_t);
4079 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4081 ptr += sizeof(binder_uintptr_t);
4083 binder_stat_br(proc, thread, cmd);
4084 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4085 proc->pid, thread->pid, cmd_name, node_debug_id,
4086 (u64)node_ptr, (u64)node_cookie);
4092 static int binder_wait_for_work(struct binder_thread *thread,
4096 struct binder_proc *proc = thread->proc;
4099 freezer_do_not_count();
4100 binder_inner_proc_lock(proc);
4102 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4103 if (binder_has_work_ilocked(thread, do_proc_work))
4106 list_add(&thread->waiting_thread_node,
4107 &proc->waiting_threads);
4108 binder_inner_proc_unlock(proc);
4110 binder_inner_proc_lock(proc);
4111 list_del_init(&thread->waiting_thread_node);
4112 if (signal_pending(current)) {
4117 finish_wait(&thread->wait, &wait);
4118 binder_inner_proc_unlock(proc);
4125 * binder_apply_fd_fixups() - finish fd translation
4126 * @proc: binder_proc associated @t->buffer
4127 * @t: binder transaction with list of fd fixups
4129 * Now that we are in the context of the transaction target
4130 * process, we can allocate and install fds. Process the
4131 * list of fds to translate and fixup the buffer with the
4134 * If we fail to allocate an fd, then free the resources by
4135 * fput'ing files that have not been processed and ksys_close'ing
4136 * any fds that have already been allocated.
4138 static int binder_apply_fd_fixups(struct binder_proc *proc,
4139 struct binder_transaction *t)
4141 struct binder_txn_fd_fixup *fixup, *tmp;
4144 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4145 int fd = get_unused_fd_flags(O_CLOEXEC);
4148 binder_debug(BINDER_DEBUG_TRANSACTION,
4149 "failed fd fixup txn %d fd %d\n",
4154 binder_debug(BINDER_DEBUG_TRANSACTION,
4155 "fd fixup txn %d fd %d\n",
4157 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4158 fd_install(fd, fixup->file);
4160 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4167 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4174 err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4180 binder_deferred_fd_close(fd);
4182 list_del(&fixup->fixup_entry);
4189 static int binder_thread_read(struct binder_proc *proc,
4190 struct binder_thread *thread,
4191 binder_uintptr_t binder_buffer, size_t size,
4192 binder_size_t *consumed, int non_block)
4194 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4195 void __user *ptr = buffer + *consumed;
4196 void __user *end = buffer + size;
4199 int wait_for_proc_work;
4201 if (*consumed == 0) {
4202 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4204 ptr += sizeof(uint32_t);
4208 binder_inner_proc_lock(proc);
4209 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4210 binder_inner_proc_unlock(proc);
4212 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4214 trace_binder_wait_for_work(wait_for_proc_work,
4215 !!thread->transaction_stack,
4216 !binder_worklist_empty(proc, &thread->todo));
4217 if (wait_for_proc_work) {
4218 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4219 BINDER_LOOPER_STATE_ENTERED))) {
4220 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4221 proc->pid, thread->pid, thread->looper);
4222 wait_event_interruptible(binder_user_error_wait,
4223 binder_stop_on_user_error < 2);
4225 binder_set_nice(proc->default_priority);
4229 if (!binder_has_work(thread, wait_for_proc_work))
4232 ret = binder_wait_for_work(thread, wait_for_proc_work);
4235 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4242 struct binder_transaction_data_secctx tr;
4243 struct binder_transaction_data *trd = &tr.transaction_data;
4244 struct binder_work *w = NULL;
4245 struct list_head *list = NULL;
4246 struct binder_transaction *t = NULL;
4247 struct binder_thread *t_from;
4248 size_t trsize = sizeof(*trd);
4250 binder_inner_proc_lock(proc);
4251 if (!binder_worklist_empty_ilocked(&thread->todo))
4252 list = &thread->todo;
4253 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4257 binder_inner_proc_unlock(proc);
4260 if (ptr - buffer == 4 && !thread->looper_need_return)
4265 if (end - ptr < sizeof(tr) + 4) {
4266 binder_inner_proc_unlock(proc);
4269 w = binder_dequeue_work_head_ilocked(list);
4270 if (binder_worklist_empty_ilocked(&thread->todo))
4271 thread->process_todo = false;
4274 case BINDER_WORK_TRANSACTION: {
4275 binder_inner_proc_unlock(proc);
4276 t = container_of(w, struct binder_transaction, work);
4278 case BINDER_WORK_RETURN_ERROR: {
4279 struct binder_error *e = container_of(
4280 w, struct binder_error, work);
4282 WARN_ON(e->cmd == BR_OK);
4283 binder_inner_proc_unlock(proc);
4284 if (put_user(e->cmd, (uint32_t __user *)ptr))
4288 ptr += sizeof(uint32_t);
4290 binder_stat_br(proc, thread, cmd);
4292 case BINDER_WORK_TRANSACTION_COMPLETE: {
4293 binder_inner_proc_unlock(proc);
4294 cmd = BR_TRANSACTION_COMPLETE;
4296 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4297 if (put_user(cmd, (uint32_t __user *)ptr))
4299 ptr += sizeof(uint32_t);
4301 binder_stat_br(proc, thread, cmd);
4302 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4303 "%d:%d BR_TRANSACTION_COMPLETE\n",
4304 proc->pid, thread->pid);
4306 case BINDER_WORK_NODE: {
4307 struct binder_node *node = container_of(w, struct binder_node, work);
4309 binder_uintptr_t node_ptr = node->ptr;
4310 binder_uintptr_t node_cookie = node->cookie;
4311 int node_debug_id = node->debug_id;
4314 void __user *orig_ptr = ptr;
4316 BUG_ON(proc != node->proc);
4317 strong = node->internal_strong_refs ||
4318 node->local_strong_refs;
4319 weak = !hlist_empty(&node->refs) ||
4320 node->local_weak_refs ||
4321 node->tmp_refs || strong;
4322 has_strong_ref = node->has_strong_ref;
4323 has_weak_ref = node->has_weak_ref;
4325 if (weak && !has_weak_ref) {
4326 node->has_weak_ref = 1;
4327 node->pending_weak_ref = 1;
4328 node->local_weak_refs++;
4330 if (strong && !has_strong_ref) {
4331 node->has_strong_ref = 1;
4332 node->pending_strong_ref = 1;
4333 node->local_strong_refs++;
4335 if (!strong && has_strong_ref)
4336 node->has_strong_ref = 0;
4337 if (!weak && has_weak_ref)
4338 node->has_weak_ref = 0;
4339 if (!weak && !strong) {
4340 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4341 "%d:%d node %d u%016llx c%016llx deleted\n",
4342 proc->pid, thread->pid,
4346 rb_erase(&node->rb_node, &proc->nodes);
4347 binder_inner_proc_unlock(proc);
4348 binder_node_lock(node);
4350 * Acquire the node lock before freeing the
4351 * node to serialize with other threads that
4352 * may have been holding the node lock while
4353 * decrementing this node (avoids race where
4354 * this thread frees while the other thread
4355 * is unlocking the node after the final
4358 binder_node_unlock(node);
4359 binder_free_node(node);
4361 binder_inner_proc_unlock(proc);
4363 if (weak && !has_weak_ref)
4364 ret = binder_put_node_cmd(
4365 proc, thread, &ptr, node_ptr,
4366 node_cookie, node_debug_id,
4367 BR_INCREFS, "BR_INCREFS");
4368 if (!ret && strong && !has_strong_ref)
4369 ret = binder_put_node_cmd(
4370 proc, thread, &ptr, node_ptr,
4371 node_cookie, node_debug_id,
4372 BR_ACQUIRE, "BR_ACQUIRE");
4373 if (!ret && !strong && has_strong_ref)
4374 ret = binder_put_node_cmd(
4375 proc, thread, &ptr, node_ptr,
4376 node_cookie, node_debug_id,
4377 BR_RELEASE, "BR_RELEASE");
4378 if (!ret && !weak && has_weak_ref)
4379 ret = binder_put_node_cmd(
4380 proc, thread, &ptr, node_ptr,
4381 node_cookie, node_debug_id,
4382 BR_DECREFS, "BR_DECREFS");
4383 if (orig_ptr == ptr)
4384 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4385 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4386 proc->pid, thread->pid,
4393 case BINDER_WORK_DEAD_BINDER:
4394 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4395 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4396 struct binder_ref_death *death;
4398 binder_uintptr_t cookie;
4400 death = container_of(w, struct binder_ref_death, work);
4401 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4402 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4404 cmd = BR_DEAD_BINDER;
4405 cookie = death->cookie;
4407 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4408 "%d:%d %s %016llx\n",
4409 proc->pid, thread->pid,
4410 cmd == BR_DEAD_BINDER ?
4412 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4414 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4415 binder_inner_proc_unlock(proc);
4417 binder_stats_deleted(BINDER_STAT_DEATH);
4419 binder_enqueue_work_ilocked(
4420 w, &proc->delivered_death);
4421 binder_inner_proc_unlock(proc);
4423 if (put_user(cmd, (uint32_t __user *)ptr))
4425 ptr += sizeof(uint32_t);
4426 if (put_user(cookie,
4427 (binder_uintptr_t __user *)ptr))
4429 ptr += sizeof(binder_uintptr_t);
4430 binder_stat_br(proc, thread, cmd);
4431 if (cmd == BR_DEAD_BINDER)
4432 goto done; /* DEAD_BINDER notifications can cause transactions */
4435 binder_inner_proc_unlock(proc);
4436 pr_err("%d:%d: bad work type %d\n",
4437 proc->pid, thread->pid, w->type);
4444 BUG_ON(t->buffer == NULL);
4445 if (t->buffer->target_node) {
4446 struct binder_node *target_node = t->buffer->target_node;
4448 trd->target.ptr = target_node->ptr;
4449 trd->cookie = target_node->cookie;
4450 t->saved_priority = task_nice(current);
4451 if (t->priority < target_node->min_priority &&
4452 !(t->flags & TF_ONE_WAY))
4453 binder_set_nice(t->priority);
4454 else if (!(t->flags & TF_ONE_WAY) ||
4455 t->saved_priority > target_node->min_priority)
4456 binder_set_nice(target_node->min_priority);
4457 cmd = BR_TRANSACTION;
4459 trd->target.ptr = 0;
4463 trd->code = t->code;
4464 trd->flags = t->flags;
4465 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4467 t_from = binder_get_txn_from(t);
4469 struct task_struct *sender = t_from->proc->tsk;
4472 task_tgid_nr_ns(sender,
4473 task_active_pid_ns(current));
4475 trd->sender_pid = 0;
4478 ret = binder_apply_fd_fixups(proc, t);
4480 struct binder_buffer *buffer = t->buffer;
4481 bool oneway = !!(t->flags & TF_ONE_WAY);
4482 int tid = t->debug_id;
4485 binder_thread_dec_tmpref(t_from);
4486 buffer->transaction = NULL;
4487 binder_cleanup_transaction(t, "fd fixups failed",
4489 binder_free_buf(proc, thread, buffer, true);
4490 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4491 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4492 proc->pid, thread->pid,
4494 (cmd == BR_REPLY ? "reply " : ""),
4495 tid, BR_FAILED_REPLY, ret, __LINE__);
4496 if (cmd == BR_REPLY) {
4497 cmd = BR_FAILED_REPLY;
4498 if (put_user(cmd, (uint32_t __user *)ptr))
4500 ptr += sizeof(uint32_t);
4501 binder_stat_br(proc, thread, cmd);
4506 trd->data_size = t->buffer->data_size;
4507 trd->offsets_size = t->buffer->offsets_size;
4508 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4509 trd->data.ptr.offsets = trd->data.ptr.buffer +
4510 ALIGN(t->buffer->data_size,
4513 tr.secctx = t->security_ctx;
4514 if (t->security_ctx) {
4515 cmd = BR_TRANSACTION_SEC_CTX;
4516 trsize = sizeof(tr);
4518 if (put_user(cmd, (uint32_t __user *)ptr)) {
4520 binder_thread_dec_tmpref(t_from);
4522 binder_cleanup_transaction(t, "put_user failed",
4527 ptr += sizeof(uint32_t);
4528 if (copy_to_user(ptr, &tr, trsize)) {
4530 binder_thread_dec_tmpref(t_from);
4532 binder_cleanup_transaction(t, "copy_to_user failed",
4539 trace_binder_transaction_received(t);
4540 binder_stat_br(proc, thread, cmd);
4541 binder_debug(BINDER_DEBUG_TRANSACTION,
4542 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4543 proc->pid, thread->pid,
4544 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4545 (cmd == BR_TRANSACTION_SEC_CTX) ?
4546 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4547 t->debug_id, t_from ? t_from->proc->pid : 0,
4548 t_from ? t_from->pid : 0, cmd,
4549 t->buffer->data_size, t->buffer->offsets_size,
4550 (u64)trd->data.ptr.buffer,
4551 (u64)trd->data.ptr.offsets);
4554 binder_thread_dec_tmpref(t_from);
4555 t->buffer->allow_user_free = 1;
4556 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4557 binder_inner_proc_lock(thread->proc);
4558 t->to_parent = thread->transaction_stack;
4559 t->to_thread = thread;
4560 thread->transaction_stack = t;
4561 binder_inner_proc_unlock(thread->proc);
4563 binder_free_transaction(t);
4570 *consumed = ptr - buffer;
4571 binder_inner_proc_lock(proc);
4572 if (proc->requested_threads == 0 &&
4573 list_empty(&thread->proc->waiting_threads) &&
4574 proc->requested_threads_started < proc->max_threads &&
4575 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4576 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4577 /*spawn a new thread if we leave this out */) {
4578 proc->requested_threads++;
4579 binder_inner_proc_unlock(proc);
4580 binder_debug(BINDER_DEBUG_THREADS,
4581 "%d:%d BR_SPAWN_LOOPER\n",
4582 proc->pid, thread->pid);
4583 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4585 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4587 binder_inner_proc_unlock(proc);
4591 static void binder_release_work(struct binder_proc *proc,
4592 struct list_head *list)
4594 struct binder_work *w;
4595 enum binder_work_type wtype;
4598 binder_inner_proc_lock(proc);
4599 w = binder_dequeue_work_head_ilocked(list);
4600 wtype = w ? w->type : 0;
4601 binder_inner_proc_unlock(proc);
4606 case BINDER_WORK_TRANSACTION: {
4607 struct binder_transaction *t;
4609 t = container_of(w, struct binder_transaction, work);
4611 binder_cleanup_transaction(t, "process died.",
4614 case BINDER_WORK_RETURN_ERROR: {
4615 struct binder_error *e = container_of(
4616 w, struct binder_error, work);
4618 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4619 "undelivered TRANSACTION_ERROR: %u\n",
4622 case BINDER_WORK_TRANSACTION_COMPLETE: {
4623 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4624 "undelivered TRANSACTION_COMPLETE\n");
4626 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4628 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4629 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4630 struct binder_ref_death *death;
4632 death = container_of(w, struct binder_ref_death, work);
4633 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4634 "undelivered death notification, %016llx\n",
4635 (u64)death->cookie);
4637 binder_stats_deleted(BINDER_STAT_DEATH);
4639 case BINDER_WORK_NODE:
4642 pr_err("unexpected work type, %d, not freed\n",
4650 static struct binder_thread *binder_get_thread_ilocked(
4651 struct binder_proc *proc, struct binder_thread *new_thread)
4653 struct binder_thread *thread = NULL;
4654 struct rb_node *parent = NULL;
4655 struct rb_node **p = &proc->threads.rb_node;
4659 thread = rb_entry(parent, struct binder_thread, rb_node);
4661 if (current->pid < thread->pid)
4663 else if (current->pid > thread->pid)
4664 p = &(*p)->rb_right;
4670 thread = new_thread;
4671 binder_stats_created(BINDER_STAT_THREAD);
4672 thread->proc = proc;
4673 thread->pid = current->pid;
4674 atomic_set(&thread->tmp_ref, 0);
4675 init_waitqueue_head(&thread->wait);
4676 INIT_LIST_HEAD(&thread->todo);
4677 rb_link_node(&thread->rb_node, parent, p);
4678 rb_insert_color(&thread->rb_node, &proc->threads);
4679 thread->looper_need_return = true;
4680 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4681 thread->return_error.cmd = BR_OK;
4682 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4683 thread->reply_error.cmd = BR_OK;
4684 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4688 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4690 struct binder_thread *thread;
4691 struct binder_thread *new_thread;
4693 binder_inner_proc_lock(proc);
4694 thread = binder_get_thread_ilocked(proc, NULL);
4695 binder_inner_proc_unlock(proc);
4697 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4698 if (new_thread == NULL)
4700 binder_inner_proc_lock(proc);
4701 thread = binder_get_thread_ilocked(proc, new_thread);
4702 binder_inner_proc_unlock(proc);
4703 if (thread != new_thread)
4709 static void binder_free_proc(struct binder_proc *proc)
4711 struct binder_device *device;
4713 BUG_ON(!list_empty(&proc->todo));
4714 BUG_ON(!list_empty(&proc->delivered_death));
4715 device = container_of(proc->context, struct binder_device, context);
4716 if (refcount_dec_and_test(&device->ref)) {
4717 kfree(proc->context->name);
4720 binder_alloc_deferred_release(&proc->alloc);
4721 put_task_struct(proc->tsk);
4722 put_cred(proc->cred);
4723 binder_stats_deleted(BINDER_STAT_PROC);
4727 static void binder_free_thread(struct binder_thread *thread)
4729 BUG_ON(!list_empty(&thread->todo));
4730 binder_stats_deleted(BINDER_STAT_THREAD);
4731 binder_proc_dec_tmpref(thread->proc);
4735 static int binder_thread_release(struct binder_proc *proc,
4736 struct binder_thread *thread)
4738 struct binder_transaction *t;
4739 struct binder_transaction *send_reply = NULL;
4740 int active_transactions = 0;
4741 struct binder_transaction *last_t = NULL;
4743 binder_inner_proc_lock(thread->proc);
4745 * take a ref on the proc so it survives
4746 * after we remove this thread from proc->threads.
4747 * The corresponding dec is when we actually
4748 * free the thread in binder_free_thread()
4752 * take a ref on this thread to ensure it
4753 * survives while we are releasing it
4755 atomic_inc(&thread->tmp_ref);
4756 rb_erase(&thread->rb_node, &proc->threads);
4757 t = thread->transaction_stack;
4759 spin_lock(&t->lock);
4760 if (t->to_thread == thread)
4763 __acquire(&t->lock);
4765 thread->is_dead = true;
4769 active_transactions++;
4770 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4771 "release %d:%d transaction %d %s, still active\n",
4772 proc->pid, thread->pid,
4774 (t->to_thread == thread) ? "in" : "out");
4776 if (t->to_thread == thread) {
4778 t->to_thread = NULL;
4780 t->buffer->transaction = NULL;
4784 } else if (t->from == thread) {
4789 spin_unlock(&last_t->lock);
4791 spin_lock(&t->lock);
4793 __acquire(&t->lock);
4795 /* annotation for sparse, lock not acquired in last iteration above */
4796 __release(&t->lock);
4799 * If this thread used poll, make sure we remove the waitqueue from any
4800 * poll data structures holding it.
4802 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4803 wake_up_pollfree(&thread->wait);
4805 binder_inner_proc_unlock(thread->proc);
4808 * This is needed to avoid races between wake_up_pollfree() above and
4809 * someone else removing the last entry from the queue for other reasons
4810 * (e.g. ep_remove_wait_queue() being called due to an epoll file
4811 * descriptor being closed). Such other users hold an RCU read lock, so
4812 * we can be sure they're done after we call synchronize_rcu().
4814 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4818 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4819 binder_release_work(proc, &thread->todo);
4820 binder_thread_dec_tmpref(thread);
4821 return active_transactions;
4824 static __poll_t binder_poll(struct file *filp,
4825 struct poll_table_struct *wait)
4827 struct binder_proc *proc = filp->private_data;
4828 struct binder_thread *thread = NULL;
4829 bool wait_for_proc_work;
4831 thread = binder_get_thread(proc);
4835 binder_inner_proc_lock(thread->proc);
4836 thread->looper |= BINDER_LOOPER_STATE_POLL;
4837 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4839 binder_inner_proc_unlock(thread->proc);
4841 poll_wait(filp, &thread->wait, wait);
4843 if (binder_has_work(thread, wait_for_proc_work))
4849 static int binder_ioctl_write_read(struct file *filp,
4850 unsigned int cmd, unsigned long arg,
4851 struct binder_thread *thread)
4854 struct binder_proc *proc = filp->private_data;
4855 unsigned int size = _IOC_SIZE(cmd);
4856 void __user *ubuf = (void __user *)arg;
4857 struct binder_write_read bwr;
4859 if (size != sizeof(struct binder_write_read)) {
4863 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4867 binder_debug(BINDER_DEBUG_READ_WRITE,
4868 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4869 proc->pid, thread->pid,
4870 (u64)bwr.write_size, (u64)bwr.write_buffer,
4871 (u64)bwr.read_size, (u64)bwr.read_buffer);
4873 if (bwr.write_size > 0) {
4874 ret = binder_thread_write(proc, thread,
4877 &bwr.write_consumed);
4878 trace_binder_write_done(ret);
4880 bwr.read_consumed = 0;
4881 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4886 if (bwr.read_size > 0) {
4887 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4890 filp->f_flags & O_NONBLOCK);
4891 trace_binder_read_done(ret);
4892 binder_inner_proc_lock(proc);
4893 if (!binder_worklist_empty_ilocked(&proc->todo))
4894 binder_wakeup_proc_ilocked(proc);
4895 binder_inner_proc_unlock(proc);
4897 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4902 binder_debug(BINDER_DEBUG_READ_WRITE,
4903 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4904 proc->pid, thread->pid,
4905 (u64)bwr.write_consumed, (u64)bwr.write_size,
4906 (u64)bwr.read_consumed, (u64)bwr.read_size);
4907 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4915 static int binder_ioctl_set_ctx_mgr(struct file *filp,
4916 struct flat_binder_object *fbo)
4919 struct binder_proc *proc = filp->private_data;
4920 struct binder_context *context = proc->context;
4921 struct binder_node *new_node;
4922 kuid_t curr_euid = current_euid();
4924 mutex_lock(&context->context_mgr_node_lock);
4925 if (context->binder_context_mgr_node) {
4926 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4930 ret = security_binder_set_context_mgr(proc->cred);
4933 if (uid_valid(context->binder_context_mgr_uid)) {
4934 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4935 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4936 from_kuid(&init_user_ns, curr_euid),
4937 from_kuid(&init_user_ns,
4938 context->binder_context_mgr_uid));
4943 context->binder_context_mgr_uid = curr_euid;
4945 new_node = binder_new_node(proc, fbo);
4950 binder_node_lock(new_node);
4951 new_node->local_weak_refs++;
4952 new_node->local_strong_refs++;
4953 new_node->has_strong_ref = 1;
4954 new_node->has_weak_ref = 1;
4955 context->binder_context_mgr_node = new_node;
4956 binder_node_unlock(new_node);
4957 binder_put_node(new_node);
4959 mutex_unlock(&context->context_mgr_node_lock);
4963 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4964 struct binder_node_info_for_ref *info)
4966 struct binder_node *node;
4967 struct binder_context *context = proc->context;
4968 __u32 handle = info->handle;
4970 if (info->strong_count || info->weak_count || info->reserved1 ||
4971 info->reserved2 || info->reserved3) {
4972 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4977 /* This ioctl may only be used by the context manager */
4978 mutex_lock(&context->context_mgr_node_lock);
4979 if (!context->binder_context_mgr_node ||
4980 context->binder_context_mgr_node->proc != proc) {
4981 mutex_unlock(&context->context_mgr_node_lock);
4984 mutex_unlock(&context->context_mgr_node_lock);
4986 node = binder_get_node_from_ref(proc, handle, true, NULL);
4990 info->strong_count = node->local_strong_refs +
4991 node->internal_strong_refs;
4992 info->weak_count = node->local_weak_refs;
4994 binder_put_node(node);
4999 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5000 struct binder_node_debug_info *info)
5003 binder_uintptr_t ptr = info->ptr;
5005 memset(info, 0, sizeof(*info));
5007 binder_inner_proc_lock(proc);
5008 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5009 struct binder_node *node = rb_entry(n, struct binder_node,
5011 if (node->ptr > ptr) {
5012 info->ptr = node->ptr;
5013 info->cookie = node->cookie;
5014 info->has_strong_ref = node->has_strong_ref;
5015 info->has_weak_ref = node->has_weak_ref;
5019 binder_inner_proc_unlock(proc);
5024 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5027 struct binder_proc *proc = filp->private_data;
5028 struct binder_thread *thread;
5029 unsigned int size = _IOC_SIZE(cmd);
5030 void __user *ubuf = (void __user *)arg;
5032 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5033 proc->pid, current->pid, cmd, arg);*/
5035 binder_selftest_alloc(&proc->alloc);
5037 trace_binder_ioctl(cmd, arg);
5039 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5043 thread = binder_get_thread(proc);
5044 if (thread == NULL) {
5050 case BINDER_WRITE_READ:
5051 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5055 case BINDER_SET_MAX_THREADS: {
5058 if (copy_from_user(&max_threads, ubuf,
5059 sizeof(max_threads))) {
5063 binder_inner_proc_lock(proc);
5064 proc->max_threads = max_threads;
5065 binder_inner_proc_unlock(proc);
5068 case BINDER_SET_CONTEXT_MGR_EXT: {
5069 struct flat_binder_object fbo;
5071 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5075 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5080 case BINDER_SET_CONTEXT_MGR:
5081 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5085 case BINDER_THREAD_EXIT:
5086 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5087 proc->pid, thread->pid);
5088 binder_thread_release(proc, thread);
5091 case BINDER_VERSION: {
5092 struct binder_version __user *ver = ubuf;
5094 if (size != sizeof(struct binder_version)) {
5098 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5099 &ver->protocol_version)) {
5105 case BINDER_GET_NODE_INFO_FOR_REF: {
5106 struct binder_node_info_for_ref info;
5108 if (copy_from_user(&info, ubuf, sizeof(info))) {
5113 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5117 if (copy_to_user(ubuf, &info, sizeof(info))) {
5124 case BINDER_GET_NODE_DEBUG_INFO: {
5125 struct binder_node_debug_info info;
5127 if (copy_from_user(&info, ubuf, sizeof(info))) {
5132 ret = binder_ioctl_get_node_debug_info(proc, &info);
5136 if (copy_to_user(ubuf, &info, sizeof(info))) {
5149 thread->looper_need_return = false;
5150 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5151 if (ret && ret != -ERESTARTSYS)
5152 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5154 trace_binder_ioctl_done(ret);
5158 static void binder_vma_open(struct vm_area_struct *vma)
5160 struct binder_proc *proc = vma->vm_private_data;
5162 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5163 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5164 proc->pid, vma->vm_start, vma->vm_end,
5165 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5166 (unsigned long)pgprot_val(vma->vm_page_prot));
5169 static void binder_vma_close(struct vm_area_struct *vma)
5171 struct binder_proc *proc = vma->vm_private_data;
5173 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5174 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5175 proc->pid, vma->vm_start, vma->vm_end,
5176 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5177 (unsigned long)pgprot_val(vma->vm_page_prot));
5178 binder_alloc_vma_close(&proc->alloc);
5181 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5183 return VM_FAULT_SIGBUS;
5186 static const struct vm_operations_struct binder_vm_ops = {
5187 .open = binder_vma_open,
5188 .close = binder_vma_close,
5189 .fault = binder_vm_fault,
5192 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5194 struct binder_proc *proc = filp->private_data;
5196 if (proc->tsk != current->group_leader)
5199 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5200 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5201 __func__, proc->pid, vma->vm_start, vma->vm_end,
5202 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5203 (unsigned long)pgprot_val(vma->vm_page_prot));
5205 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5206 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5207 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5210 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5211 vma->vm_flags &= ~VM_MAYWRITE;
5213 vma->vm_ops = &binder_vm_ops;
5214 vma->vm_private_data = proc;
5216 return binder_alloc_mmap_handler(&proc->alloc, vma);
5219 static int binder_open(struct inode *nodp, struct file *filp)
5221 struct binder_proc *proc, *itr;
5222 struct binder_device *binder_dev;
5223 struct binderfs_info *info;
5224 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5225 bool existing_pid = false;
5227 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5228 current->group_leader->pid, current->pid);
5230 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5233 spin_lock_init(&proc->inner_lock);
5234 spin_lock_init(&proc->outer_lock);
5235 get_task_struct(current->group_leader);
5236 proc->tsk = current->group_leader;
5237 proc->cred = get_cred(filp->f_cred);
5238 INIT_LIST_HEAD(&proc->todo);
5239 proc->default_priority = task_nice(current);
5240 /* binderfs stashes devices in i_private */
5241 if (is_binderfs_device(nodp)) {
5242 binder_dev = nodp->i_private;
5243 info = nodp->i_sb->s_fs_info;
5244 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5246 binder_dev = container_of(filp->private_data,
5247 struct binder_device, miscdev);
5249 refcount_inc(&binder_dev->ref);
5250 proc->context = &binder_dev->context;
5251 binder_alloc_init(&proc->alloc);
5253 binder_stats_created(BINDER_STAT_PROC);
5254 proc->pid = current->group_leader->pid;
5255 INIT_LIST_HEAD(&proc->delivered_death);
5256 INIT_LIST_HEAD(&proc->waiting_threads);
5257 filp->private_data = proc;
5259 mutex_lock(&binder_procs_lock);
5260 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5261 if (itr->pid == proc->pid) {
5262 existing_pid = true;
5266 hlist_add_head(&proc->proc_node, &binder_procs);
5267 mutex_unlock(&binder_procs_lock);
5269 if (binder_debugfs_dir_entry_proc && !existing_pid) {
5272 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5274 * proc debug entries are shared between contexts.
5275 * Only create for the first PID to avoid debugfs log spamming
5276 * The printing code will anyway print all contexts for a given
5277 * PID so this is not a problem.
5279 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5280 binder_debugfs_dir_entry_proc,
5281 (void *)(unsigned long)proc->pid,
5285 if (binder_binderfs_dir_entry_proc && !existing_pid) {
5287 struct dentry *binderfs_entry;
5289 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5291 * Similar to debugfs, the process specific log file is shared
5292 * between contexts. Only create for the first PID.
5293 * This is ok since same as debugfs, the log file will contain
5294 * information on all contexts of a given PID.
5296 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5297 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5298 if (!IS_ERR(binderfs_entry)) {
5299 proc->binderfs_entry = binderfs_entry;
5303 error = PTR_ERR(binderfs_entry);
5304 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5312 static int binder_flush(struct file *filp, fl_owner_t id)
5314 struct binder_proc *proc = filp->private_data;
5316 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5321 static void binder_deferred_flush(struct binder_proc *proc)
5326 binder_inner_proc_lock(proc);
5327 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5328 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5330 thread->looper_need_return = true;
5331 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5332 wake_up_interruptible(&thread->wait);
5336 binder_inner_proc_unlock(proc);
5338 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5339 "binder_flush: %d woke %d threads\n", proc->pid,
5343 static int binder_release(struct inode *nodp, struct file *filp)
5345 struct binder_proc *proc = filp->private_data;
5347 debugfs_remove(proc->debugfs_entry);
5349 if (proc->binderfs_entry) {
5350 binderfs_remove_file(proc->binderfs_entry);
5351 proc->binderfs_entry = NULL;
5354 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5359 static int binder_node_release(struct binder_node *node, int refs)
5361 struct binder_ref *ref;
5363 struct binder_proc *proc = node->proc;
5365 binder_release_work(proc, &node->async_todo);
5367 binder_node_lock(node);
5368 binder_inner_proc_lock(proc);
5369 binder_dequeue_work_ilocked(&node->work);
5371 * The caller must have taken a temporary ref on the node,
5373 BUG_ON(!node->tmp_refs);
5374 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5375 binder_inner_proc_unlock(proc);
5376 binder_node_unlock(node);
5377 binder_free_node(node);
5383 node->local_strong_refs = 0;
5384 node->local_weak_refs = 0;
5385 binder_inner_proc_unlock(proc);
5387 spin_lock(&binder_dead_nodes_lock);
5388 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5389 spin_unlock(&binder_dead_nodes_lock);
5391 hlist_for_each_entry(ref, &node->refs, node_entry) {
5394 * Need the node lock to synchronize
5395 * with new notification requests and the
5396 * inner lock to synchronize with queued
5397 * death notifications.
5399 binder_inner_proc_lock(ref->proc);
5401 binder_inner_proc_unlock(ref->proc);
5407 BUG_ON(!list_empty(&ref->death->work.entry));
5408 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5409 binder_enqueue_work_ilocked(&ref->death->work,
5411 binder_wakeup_proc_ilocked(ref->proc);
5412 binder_inner_proc_unlock(ref->proc);
5415 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5416 "node %d now dead, refs %d, death %d\n",
5417 node->debug_id, refs, death);
5418 binder_node_unlock(node);
5419 binder_put_node(node);
5424 static void binder_deferred_release(struct binder_proc *proc)
5426 struct binder_context *context = proc->context;
5428 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5430 mutex_lock(&binder_procs_lock);
5431 hlist_del(&proc->proc_node);
5432 mutex_unlock(&binder_procs_lock);
5434 mutex_lock(&context->context_mgr_node_lock);
5435 if (context->binder_context_mgr_node &&
5436 context->binder_context_mgr_node->proc == proc) {
5437 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5438 "%s: %d context_mgr_node gone\n",
5439 __func__, proc->pid);
5440 context->binder_context_mgr_node = NULL;
5442 mutex_unlock(&context->context_mgr_node_lock);
5443 binder_inner_proc_lock(proc);
5445 * Make sure proc stays alive after we
5446 * remove all the threads
5450 proc->is_dead = true;
5452 active_transactions = 0;
5453 while ((n = rb_first(&proc->threads))) {
5454 struct binder_thread *thread;
5456 thread = rb_entry(n, struct binder_thread, rb_node);
5457 binder_inner_proc_unlock(proc);
5459 active_transactions += binder_thread_release(proc, thread);
5460 binder_inner_proc_lock(proc);
5465 while ((n = rb_first(&proc->nodes))) {
5466 struct binder_node *node;
5468 node = rb_entry(n, struct binder_node, rb_node);
5471 * take a temporary ref on the node before
5472 * calling binder_node_release() which will either
5473 * kfree() the node or call binder_put_node()
5475 binder_inc_node_tmpref_ilocked(node);
5476 rb_erase(&node->rb_node, &proc->nodes);
5477 binder_inner_proc_unlock(proc);
5478 incoming_refs = binder_node_release(node, incoming_refs);
5479 binder_inner_proc_lock(proc);
5481 binder_inner_proc_unlock(proc);
5484 binder_proc_lock(proc);
5485 while ((n = rb_first(&proc->refs_by_desc))) {
5486 struct binder_ref *ref;
5488 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5490 binder_cleanup_ref_olocked(ref);
5491 binder_proc_unlock(proc);
5492 binder_free_ref(ref);
5493 binder_proc_lock(proc);
5495 binder_proc_unlock(proc);
5497 binder_release_work(proc, &proc->todo);
5498 binder_release_work(proc, &proc->delivered_death);
5500 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5501 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5502 __func__, proc->pid, threads, nodes, incoming_refs,
5503 outgoing_refs, active_transactions);
5505 binder_proc_dec_tmpref(proc);
5508 static void binder_deferred_func(struct work_struct *work)
5510 struct binder_proc *proc;
5515 mutex_lock(&binder_deferred_lock);
5516 if (!hlist_empty(&binder_deferred_list)) {
5517 proc = hlist_entry(binder_deferred_list.first,
5518 struct binder_proc, deferred_work_node);
5519 hlist_del_init(&proc->deferred_work_node);
5520 defer = proc->deferred_work;
5521 proc->deferred_work = 0;
5526 mutex_unlock(&binder_deferred_lock);
5528 if (defer & BINDER_DEFERRED_FLUSH)
5529 binder_deferred_flush(proc);
5531 if (defer & BINDER_DEFERRED_RELEASE)
5532 binder_deferred_release(proc); /* frees proc */
5535 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5538 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5540 mutex_lock(&binder_deferred_lock);
5541 proc->deferred_work |= defer;
5542 if (hlist_unhashed(&proc->deferred_work_node)) {
5543 hlist_add_head(&proc->deferred_work_node,
5544 &binder_deferred_list);
5545 schedule_work(&binder_deferred_work);
5547 mutex_unlock(&binder_deferred_lock);
5550 static void print_binder_transaction_ilocked(struct seq_file *m,
5551 struct binder_proc *proc,
5553 struct binder_transaction *t)
5555 struct binder_proc *to_proc;
5556 struct binder_buffer *buffer = t->buffer;
5558 spin_lock(&t->lock);
5559 to_proc = t->to_proc;
5561 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5562 prefix, t->debug_id, t,
5563 t->from ? t->from->proc->pid : 0,
5564 t->from ? t->from->pid : 0,
5565 to_proc ? to_proc->pid : 0,
5566 t->to_thread ? t->to_thread->pid : 0,
5567 t->code, t->flags, t->priority, t->need_reply);
5568 spin_unlock(&t->lock);
5570 if (proc != to_proc) {
5572 * Can only safely deref buffer if we are holding the
5573 * correct proc inner lock for this node
5579 if (buffer == NULL) {
5580 seq_puts(m, " buffer free\n");
5583 if (buffer->target_node)
5584 seq_printf(m, " node %d", buffer->target_node->debug_id);
5585 seq_printf(m, " size %zd:%zd data %pK\n",
5586 buffer->data_size, buffer->offsets_size,
5590 static void print_binder_work_ilocked(struct seq_file *m,
5591 struct binder_proc *proc,
5593 const char *transaction_prefix,
5594 struct binder_work *w)
5596 struct binder_node *node;
5597 struct binder_transaction *t;
5600 case BINDER_WORK_TRANSACTION:
5601 t = container_of(w, struct binder_transaction, work);
5602 print_binder_transaction_ilocked(
5603 m, proc, transaction_prefix, t);
5605 case BINDER_WORK_RETURN_ERROR: {
5606 struct binder_error *e = container_of(
5607 w, struct binder_error, work);
5609 seq_printf(m, "%stransaction error: %u\n",
5612 case BINDER_WORK_TRANSACTION_COMPLETE:
5613 seq_printf(m, "%stransaction complete\n", prefix);
5615 case BINDER_WORK_NODE:
5616 node = container_of(w, struct binder_node, work);
5617 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5618 prefix, node->debug_id,
5619 (u64)node->ptr, (u64)node->cookie);
5621 case BINDER_WORK_DEAD_BINDER:
5622 seq_printf(m, "%shas dead binder\n", prefix);
5624 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5625 seq_printf(m, "%shas cleared dead binder\n", prefix);
5627 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5628 seq_printf(m, "%shas cleared death notification\n", prefix);
5631 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5636 static void print_binder_thread_ilocked(struct seq_file *m,
5637 struct binder_thread *thread,
5640 struct binder_transaction *t;
5641 struct binder_work *w;
5642 size_t start_pos = m->count;
5645 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5646 thread->pid, thread->looper,
5647 thread->looper_need_return,
5648 atomic_read(&thread->tmp_ref));
5649 header_pos = m->count;
5650 t = thread->transaction_stack;
5652 if (t->from == thread) {
5653 print_binder_transaction_ilocked(m, thread->proc,
5654 " outgoing transaction", t);
5656 } else if (t->to_thread == thread) {
5657 print_binder_transaction_ilocked(m, thread->proc,
5658 " incoming transaction", t);
5661 print_binder_transaction_ilocked(m, thread->proc,
5662 " bad transaction", t);
5666 list_for_each_entry(w, &thread->todo, entry) {
5667 print_binder_work_ilocked(m, thread->proc, " ",
5668 " pending transaction", w);
5670 if (!print_always && m->count == header_pos)
5671 m->count = start_pos;
5674 static void print_binder_node_nilocked(struct seq_file *m,
5675 struct binder_node *node)
5677 struct binder_ref *ref;
5678 struct binder_work *w;
5682 hlist_for_each_entry(ref, &node->refs, node_entry)
5685 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5686 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5687 node->has_strong_ref, node->has_weak_ref,
5688 node->local_strong_refs, node->local_weak_refs,
5689 node->internal_strong_refs, count, node->tmp_refs);
5691 seq_puts(m, " proc");
5692 hlist_for_each_entry(ref, &node->refs, node_entry)
5693 seq_printf(m, " %d", ref->proc->pid);
5697 list_for_each_entry(w, &node->async_todo, entry)
5698 print_binder_work_ilocked(m, node->proc, " ",
5699 " pending async transaction", w);
5703 static void print_binder_ref_olocked(struct seq_file *m,
5704 struct binder_ref *ref)
5706 binder_node_lock(ref->node);
5707 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5708 ref->data.debug_id, ref->data.desc,
5709 ref->node->proc ? "" : "dead ",
5710 ref->node->debug_id, ref->data.strong,
5711 ref->data.weak, ref->death);
5712 binder_node_unlock(ref->node);
5715 static void print_binder_proc(struct seq_file *m,
5716 struct binder_proc *proc, int print_all)
5718 struct binder_work *w;
5720 size_t start_pos = m->count;
5722 struct binder_node *last_node = NULL;
5724 seq_printf(m, "proc %d\n", proc->pid);
5725 seq_printf(m, "context %s\n", proc->context->name);
5726 header_pos = m->count;
5728 binder_inner_proc_lock(proc);
5729 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5730 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5731 rb_node), print_all);
5733 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5734 struct binder_node *node = rb_entry(n, struct binder_node,
5736 if (!print_all && !node->has_async_transaction)
5740 * take a temporary reference on the node so it
5741 * survives and isn't removed from the tree
5742 * while we print it.
5744 binder_inc_node_tmpref_ilocked(node);
5745 /* Need to drop inner lock to take node lock */
5746 binder_inner_proc_unlock(proc);
5748 binder_put_node(last_node);
5749 binder_node_inner_lock(node);
5750 print_binder_node_nilocked(m, node);
5751 binder_node_inner_unlock(node);
5753 binder_inner_proc_lock(proc);
5755 binder_inner_proc_unlock(proc);
5757 binder_put_node(last_node);
5760 binder_proc_lock(proc);
5761 for (n = rb_first(&proc->refs_by_desc);
5764 print_binder_ref_olocked(m, rb_entry(n,
5767 binder_proc_unlock(proc);
5769 binder_alloc_print_allocated(m, &proc->alloc);
5770 binder_inner_proc_lock(proc);
5771 list_for_each_entry(w, &proc->todo, entry)
5772 print_binder_work_ilocked(m, proc, " ",
5773 " pending transaction", w);
5774 list_for_each_entry(w, &proc->delivered_death, entry) {
5775 seq_puts(m, " has delivered dead binder\n");
5778 binder_inner_proc_unlock(proc);
5779 if (!print_all && m->count == header_pos)
5780 m->count = start_pos;
5783 static const char * const binder_return_strings[] = {
5788 "BR_ACQUIRE_RESULT",
5790 "BR_TRANSACTION_COMPLETE",
5795 "BR_ATTEMPT_ACQUIRE",
5800 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5804 static const char * const binder_command_strings[] = {
5807 "BC_ACQUIRE_RESULT",
5815 "BC_ATTEMPT_ACQUIRE",
5816 "BC_REGISTER_LOOPER",
5819 "BC_REQUEST_DEATH_NOTIFICATION",
5820 "BC_CLEAR_DEATH_NOTIFICATION",
5821 "BC_DEAD_BINDER_DONE",
5822 "BC_TRANSACTION_SG",
5826 static const char * const binder_objstat_strings[] = {
5833 "transaction_complete"
5836 static void print_binder_stats(struct seq_file *m, const char *prefix,
5837 struct binder_stats *stats)
5841 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5842 ARRAY_SIZE(binder_command_strings));
5843 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5844 int temp = atomic_read(&stats->bc[i]);
5847 seq_printf(m, "%s%s: %d\n", prefix,
5848 binder_command_strings[i], temp);
5851 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5852 ARRAY_SIZE(binder_return_strings));
5853 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5854 int temp = atomic_read(&stats->br[i]);
5857 seq_printf(m, "%s%s: %d\n", prefix,
5858 binder_return_strings[i], temp);
5861 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5862 ARRAY_SIZE(binder_objstat_strings));
5863 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5864 ARRAY_SIZE(stats->obj_deleted));
5865 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5866 int created = atomic_read(&stats->obj_created[i]);
5867 int deleted = atomic_read(&stats->obj_deleted[i]);
5869 if (created || deleted)
5870 seq_printf(m, "%s%s: active %d total %d\n",
5872 binder_objstat_strings[i],
5878 static void print_binder_proc_stats(struct seq_file *m,
5879 struct binder_proc *proc)
5881 struct binder_work *w;
5882 struct binder_thread *thread;
5884 int count, strong, weak, ready_threads;
5885 size_t free_async_space =
5886 binder_alloc_get_free_async_space(&proc->alloc);
5888 seq_printf(m, "proc %d\n", proc->pid);
5889 seq_printf(m, "context %s\n", proc->context->name);
5892 binder_inner_proc_lock(proc);
5893 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5896 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5899 seq_printf(m, " threads: %d\n", count);
5900 seq_printf(m, " requested threads: %d+%d/%d\n"
5901 " ready threads %d\n"
5902 " free async space %zd\n", proc->requested_threads,
5903 proc->requested_threads_started, proc->max_threads,
5907 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5909 binder_inner_proc_unlock(proc);
5910 seq_printf(m, " nodes: %d\n", count);
5914 binder_proc_lock(proc);
5915 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5916 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5919 strong += ref->data.strong;
5920 weak += ref->data.weak;
5922 binder_proc_unlock(proc);
5923 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5925 count = binder_alloc_get_allocated_count(&proc->alloc);
5926 seq_printf(m, " buffers: %d\n", count);
5928 binder_alloc_print_pages(m, &proc->alloc);
5931 binder_inner_proc_lock(proc);
5932 list_for_each_entry(w, &proc->todo, entry) {
5933 if (w->type == BINDER_WORK_TRANSACTION)
5936 binder_inner_proc_unlock(proc);
5937 seq_printf(m, " pending transactions: %d\n", count);
5939 print_binder_stats(m, " ", &proc->stats);
5943 int binder_state_show(struct seq_file *m, void *unused)
5945 struct binder_proc *proc;
5946 struct binder_node *node;
5947 struct binder_node *last_node = NULL;
5949 seq_puts(m, "binder state:\n");
5951 spin_lock(&binder_dead_nodes_lock);
5952 if (!hlist_empty(&binder_dead_nodes))
5953 seq_puts(m, "dead nodes:\n");
5954 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5956 * take a temporary reference on the node so it
5957 * survives and isn't removed from the list
5958 * while we print it.
5961 spin_unlock(&binder_dead_nodes_lock);
5963 binder_put_node(last_node);
5964 binder_node_lock(node);
5965 print_binder_node_nilocked(m, node);
5966 binder_node_unlock(node);
5968 spin_lock(&binder_dead_nodes_lock);
5970 spin_unlock(&binder_dead_nodes_lock);
5972 binder_put_node(last_node);
5974 mutex_lock(&binder_procs_lock);
5975 hlist_for_each_entry(proc, &binder_procs, proc_node)
5976 print_binder_proc(m, proc, 1);
5977 mutex_unlock(&binder_procs_lock);
5982 int binder_stats_show(struct seq_file *m, void *unused)
5984 struct binder_proc *proc;
5986 seq_puts(m, "binder stats:\n");
5988 print_binder_stats(m, "", &binder_stats);
5990 mutex_lock(&binder_procs_lock);
5991 hlist_for_each_entry(proc, &binder_procs, proc_node)
5992 print_binder_proc_stats(m, proc);
5993 mutex_unlock(&binder_procs_lock);
5998 int binder_transactions_show(struct seq_file *m, void *unused)
6000 struct binder_proc *proc;
6002 seq_puts(m, "binder transactions:\n");
6003 mutex_lock(&binder_procs_lock);
6004 hlist_for_each_entry(proc, &binder_procs, proc_node)
6005 print_binder_proc(m, proc, 0);
6006 mutex_unlock(&binder_procs_lock);
6011 static int proc_show(struct seq_file *m, void *unused)
6013 struct binder_proc *itr;
6014 int pid = (unsigned long)m->private;
6016 mutex_lock(&binder_procs_lock);
6017 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6018 if (itr->pid == pid) {
6019 seq_puts(m, "binder proc state:\n");
6020 print_binder_proc(m, itr, 1);
6023 mutex_unlock(&binder_procs_lock);
6028 static void print_binder_transaction_log_entry(struct seq_file *m,
6029 struct binder_transaction_log_entry *e)
6031 int debug_id = READ_ONCE(e->debug_id_done);
6033 * read barrier to guarantee debug_id_done read before
6034 * we print the log values
6038 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6039 e->debug_id, (e->call_type == 2) ? "reply" :
6040 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6041 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6042 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6043 e->return_error, e->return_error_param,
6044 e->return_error_line);
6046 * read-barrier to guarantee read of debug_id_done after
6047 * done printing the fields of the entry
6050 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6051 "\n" : " (incomplete)\n");
6054 int binder_transaction_log_show(struct seq_file *m, void *unused)
6056 struct binder_transaction_log *log = m->private;
6057 unsigned int log_cur = atomic_read(&log->cur);
6062 count = log_cur + 1;
6063 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6064 0 : count % ARRAY_SIZE(log->entry);
6065 if (count > ARRAY_SIZE(log->entry) || log->full)
6066 count = ARRAY_SIZE(log->entry);
6067 for (i = 0; i < count; i++) {
6068 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6070 print_binder_transaction_log_entry(m, &log->entry[index]);
6075 const struct file_operations binder_fops = {
6076 .owner = THIS_MODULE,
6077 .poll = binder_poll,
6078 .unlocked_ioctl = binder_ioctl,
6079 .compat_ioctl = compat_ptr_ioctl,
6080 .mmap = binder_mmap,
6081 .open = binder_open,
6082 .flush = binder_flush,
6083 .release = binder_release,
6084 .may_pollfree = true,
6087 static int __init init_binder_device(const char *name)
6090 struct binder_device *binder_device;
6092 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6096 binder_device->miscdev.fops = &binder_fops;
6097 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6098 binder_device->miscdev.name = name;
6100 refcount_set(&binder_device->ref, 1);
6101 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6102 binder_device->context.name = name;
6103 mutex_init(&binder_device->context.context_mgr_node_lock);
6105 ret = misc_register(&binder_device->miscdev);
6107 kfree(binder_device);
6111 hlist_add_head(&binder_device->hlist, &binder_devices);
6116 static int __init binder_init(void)
6119 char *device_name, *device_tmp;
6120 struct binder_device *device;
6121 struct hlist_node *tmp;
6122 char *device_names = NULL;
6124 ret = binder_alloc_shrinker_init();
6128 atomic_set(&binder_transaction_log.cur, ~0U);
6129 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6131 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6132 if (binder_debugfs_dir_entry_root)
6133 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6134 binder_debugfs_dir_entry_root);
6136 if (binder_debugfs_dir_entry_root) {
6137 debugfs_create_file("state",
6139 binder_debugfs_dir_entry_root,
6141 &binder_state_fops);
6142 debugfs_create_file("stats",
6144 binder_debugfs_dir_entry_root,
6146 &binder_stats_fops);
6147 debugfs_create_file("transactions",
6149 binder_debugfs_dir_entry_root,
6151 &binder_transactions_fops);
6152 debugfs_create_file("transaction_log",
6154 binder_debugfs_dir_entry_root,
6155 &binder_transaction_log,
6156 &binder_transaction_log_fops);
6157 debugfs_create_file("failed_transaction_log",
6159 binder_debugfs_dir_entry_root,
6160 &binder_transaction_log_failed,
6161 &binder_transaction_log_fops);
6164 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6165 strcmp(binder_devices_param, "") != 0) {
6167 * Copy the module_parameter string, because we don't want to
6168 * tokenize it in-place.
6170 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6171 if (!device_names) {
6173 goto err_alloc_device_names_failed;
6176 device_tmp = device_names;
6177 while ((device_name = strsep(&device_tmp, ","))) {
6178 ret = init_binder_device(device_name);
6180 goto err_init_binder_device_failed;
6184 ret = init_binderfs();
6186 goto err_init_binder_device_failed;
6190 err_init_binder_device_failed:
6191 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6192 misc_deregister(&device->miscdev);
6193 hlist_del(&device->hlist);
6197 kfree(device_names);
6199 err_alloc_device_names_failed:
6200 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6205 device_initcall(binder_init);
6207 #define CREATE_TRACE_POINTS
6208 #include "binder_trace.h"
6210 MODULE_LICENSE("GPL v2");