1 // SPDX-License-Identifier: GPL-2.0-only
4 * Android IPC Subsystem
6 * Copyright (C) 2007-2008 Google, Inc.
12 * There are 3 main spinlocks which must be acquired in the
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
70 #include <uapi/linux/android/binder.h>
71 #include <uapi/linux/android/binderfs.h>
73 #include <asm/cacheflush.h>
75 #include "binder_alloc.h"
76 #include "binder_internal.h"
77 #include "binder_trace.h"
79 static HLIST_HEAD(binder_deferred_list);
80 static DEFINE_MUTEX(binder_deferred_lock);
82 static HLIST_HEAD(binder_devices);
83 static HLIST_HEAD(binder_procs);
84 static DEFINE_MUTEX(binder_procs_lock);
86 static HLIST_HEAD(binder_dead_nodes);
87 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
89 static struct dentry *binder_debugfs_dir_entry_root;
90 static struct dentry *binder_debugfs_dir_entry_proc;
91 static atomic_t binder_last_id;
93 static int proc_show(struct seq_file *m, void *unused);
94 DEFINE_SHOW_ATTRIBUTE(proc);
96 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
99 BINDER_DEBUG_USER_ERROR = 1U << 0,
100 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
101 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
102 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
103 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
104 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
105 BINDER_DEBUG_READ_WRITE = 1U << 6,
106 BINDER_DEBUG_USER_REFS = 1U << 7,
107 BINDER_DEBUG_THREADS = 1U << 8,
108 BINDER_DEBUG_TRANSACTION = 1U << 9,
109 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
110 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
111 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
112 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
113 BINDER_DEBUG_SPINLOCKS = 1U << 14,
115 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
116 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
117 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
119 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
120 module_param_named(devices, binder_devices_param, charp, 0444);
122 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
123 static int binder_stop_on_user_error;
125 static int binder_set_stop_on_user_error(const char *val,
126 const struct kernel_param *kp)
130 ret = param_set_int(val, kp);
131 if (binder_stop_on_user_error < 2)
132 wake_up(&binder_user_error_wait);
135 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
136 param_get_int, &binder_stop_on_user_error, 0644);
138 #define binder_debug(mask, x...) \
140 if (binder_debug_mask & mask) \
141 pr_info_ratelimited(x); \
144 #define binder_user_error(x...) \
146 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
147 pr_info_ratelimited(x); \
148 if (binder_stop_on_user_error) \
149 binder_stop_on_user_error = 2; \
152 #define to_flat_binder_object(hdr) \
153 container_of(hdr, struct flat_binder_object, hdr)
155 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
157 #define to_binder_buffer_object(hdr) \
158 container_of(hdr, struct binder_buffer_object, hdr)
160 #define to_binder_fd_array_object(hdr) \
161 container_of(hdr, struct binder_fd_array_object, hdr)
163 enum binder_stat_types {
169 BINDER_STAT_TRANSACTION,
170 BINDER_STAT_TRANSACTION_COMPLETE,
174 struct binder_stats {
175 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
176 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
177 atomic_t obj_created[BINDER_STAT_COUNT];
178 atomic_t obj_deleted[BINDER_STAT_COUNT];
181 static struct binder_stats binder_stats;
183 static inline void binder_stats_deleted(enum binder_stat_types type)
185 atomic_inc(&binder_stats.obj_deleted[type]);
188 static inline void binder_stats_created(enum binder_stat_types type)
190 atomic_inc(&binder_stats.obj_created[type]);
193 struct binder_transaction_log binder_transaction_log;
194 struct binder_transaction_log binder_transaction_log_failed;
196 static struct binder_transaction_log_entry *binder_transaction_log_add(
197 struct binder_transaction_log *log)
199 struct binder_transaction_log_entry *e;
200 unsigned int cur = atomic_inc_return(&log->cur);
202 if (cur >= ARRAY_SIZE(log->entry))
204 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
205 WRITE_ONCE(e->debug_id_done, 0);
207 * write-barrier to synchronize access to e->debug_id_done.
208 * We make sure the initialized 0 value is seen before
209 * memset() other fields are zeroed by memset.
212 memset(e, 0, sizeof(*e));
217 * struct binder_work - work enqueued on a worklist
218 * @entry: node enqueued on list
219 * @type: type of work to be performed
221 * There are separate work lists for proc, thread, and node (async).
224 struct list_head entry;
226 enum binder_work_type {
227 BINDER_WORK_TRANSACTION = 1,
228 BINDER_WORK_TRANSACTION_COMPLETE,
229 BINDER_WORK_RETURN_ERROR,
231 BINDER_WORK_DEAD_BINDER,
232 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
233 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
237 struct binder_error {
238 struct binder_work work;
243 * struct binder_node - binder node bookkeeping
244 * @debug_id: unique ID for debugging
245 * (invariant after initialized)
246 * @lock: lock for node fields
247 * @work: worklist element for node work
248 * (protected by @proc->inner_lock)
249 * @rb_node: element for proc->nodes tree
250 * (protected by @proc->inner_lock)
251 * @dead_node: element for binder_dead_nodes list
252 * (protected by binder_dead_nodes_lock)
253 * @proc: binder_proc that owns this node
254 * (invariant after initialized)
255 * @refs: list of references on this node
256 * (protected by @lock)
257 * @internal_strong_refs: used to take strong references when
258 * initiating a transaction
259 * (protected by @proc->inner_lock if @proc
261 * @local_weak_refs: weak user refs from local process
262 * (protected by @proc->inner_lock if @proc
264 * @local_strong_refs: strong user refs from local process
265 * (protected by @proc->inner_lock if @proc
267 * @tmp_refs: temporary kernel refs
268 * (protected by @proc->inner_lock while @proc
269 * is valid, and by binder_dead_nodes_lock
270 * if @proc is NULL. During inc/dec and node release
271 * it is also protected by @lock to provide safety
272 * as the node dies and @proc becomes NULL)
273 * @ptr: userspace pointer for node
274 * (invariant, no lock needed)
275 * @cookie: userspace cookie for node
276 * (invariant, no lock needed)
277 * @has_strong_ref: userspace notified of strong ref
278 * (protected by @proc->inner_lock if @proc
280 * @pending_strong_ref: userspace has acked notification of strong ref
281 * (protected by @proc->inner_lock if @proc
283 * @has_weak_ref: userspace notified of weak ref
284 * (protected by @proc->inner_lock if @proc
286 * @pending_weak_ref: userspace has acked notification of weak ref
287 * (protected by @proc->inner_lock if @proc
289 * @has_async_transaction: async transaction to node in progress
290 * (protected by @lock)
291 * @accept_fds: file descriptor operations supported for node
292 * (invariant after initialized)
293 * @min_priority: minimum scheduling priority
294 * (invariant after initialized)
295 * @txn_security_ctx: require sender's security context
296 * (invariant after initialized)
297 * @async_todo: list of async work items
298 * (protected by @proc->inner_lock)
300 * Bookkeeping structure for binder nodes.
305 struct binder_work work;
307 struct rb_node rb_node;
308 struct hlist_node dead_node;
310 struct binder_proc *proc;
311 struct hlist_head refs;
312 int internal_strong_refs;
314 int local_strong_refs;
316 binder_uintptr_t ptr;
317 binder_uintptr_t cookie;
320 * bitfield elements protected by
324 u8 pending_strong_ref:1;
326 u8 pending_weak_ref:1;
330 * invariant after initialization
333 u8 txn_security_ctx:1;
336 bool has_async_transaction;
337 struct list_head async_todo;
340 struct binder_ref_death {
342 * @work: worklist element for death notifications
343 * (protected by inner_lock of the proc that
344 * this ref belongs to)
346 struct binder_work work;
347 binder_uintptr_t cookie;
351 * struct binder_ref_data - binder_ref counts and id
352 * @debug_id: unique ID for the ref
353 * @desc: unique userspace handle for ref
354 * @strong: strong ref count (debugging only if not locked)
355 * @weak: weak ref count (debugging only if not locked)
357 * Structure to hold ref count and ref id information. Since
358 * the actual ref can only be accessed with a lock, this structure
359 * is used to return information about the ref to callers of
360 * ref inc/dec functions.
362 struct binder_ref_data {
370 * struct binder_ref - struct to track references on nodes
371 * @data: binder_ref_data containing id, handle, and current refcounts
372 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
373 * @rb_node_node: node for lookup by @node in proc's rb_tree
374 * @node_entry: list entry for node->refs list in target node
375 * (protected by @node->lock)
376 * @proc: binder_proc containing ref
377 * @node: binder_node of target node. When cleaning up a
378 * ref for deletion in binder_cleanup_ref, a non-NULL
379 * @node indicates the node must be freed
380 * @death: pointer to death notification (ref_death) if requested
381 * (protected by @node->lock)
383 * Structure to track references from procA to target node (on procB). This
384 * structure is unsafe to access without holding @proc->outer_lock.
387 /* Lookups needed: */
388 /* node + proc => ref (transaction) */
389 /* desc + proc => ref (transaction, inc/dec ref) */
390 /* node => refs + procs (proc exit) */
391 struct binder_ref_data data;
392 struct rb_node rb_node_desc;
393 struct rb_node rb_node_node;
394 struct hlist_node node_entry;
395 struct binder_proc *proc;
396 struct binder_node *node;
397 struct binder_ref_death *death;
400 enum binder_deferred_state {
401 BINDER_DEFERRED_FLUSH = 0x01,
402 BINDER_DEFERRED_RELEASE = 0x02,
406 * struct binder_proc - binder process bookkeeping
407 * @proc_node: element for binder_procs list
408 * @threads: rbtree of binder_threads in this proc
409 * (protected by @inner_lock)
410 * @nodes: rbtree of binder nodes associated with
411 * this proc ordered by node->ptr
412 * (protected by @inner_lock)
413 * @refs_by_desc: rbtree of refs ordered by ref->desc
414 * (protected by @outer_lock)
415 * @refs_by_node: rbtree of refs ordered by ref->node
416 * (protected by @outer_lock)
417 * @waiting_threads: threads currently waiting for proc work
418 * (protected by @inner_lock)
419 * @pid PID of group_leader of process
420 * (invariant after initialized)
421 * @tsk task_struct for group_leader of process
422 * (invariant after initialized)
423 * @cred struct cred associated with the `struct file`
425 * (invariant after initialized)
426 * @deferred_work_node: element for binder_deferred_list
427 * (protected by binder_deferred_lock)
428 * @deferred_work: bitmap of deferred work to perform
429 * (protected by binder_deferred_lock)
430 * @is_dead: process is dead and awaiting free
431 * when outstanding transactions are cleaned up
432 * (protected by @inner_lock)
433 * @todo: list of work for this process
434 * (protected by @inner_lock)
435 * @stats: per-process binder statistics
436 * (atomics, no lock needed)
437 * @delivered_death: list of delivered death notification
438 * (protected by @inner_lock)
439 * @max_threads: cap on number of binder threads
440 * (protected by @inner_lock)
441 * @requested_threads: number of binder threads requested but not
442 * yet started. In current implementation, can
444 * (protected by @inner_lock)
445 * @requested_threads_started: number binder threads started
446 * (protected by @inner_lock)
447 * @tmp_ref: temporary reference to indicate proc is in use
448 * (protected by @inner_lock)
449 * @default_priority: default scheduler priority
450 * (invariant after initialized)
451 * @debugfs_entry: debugfs node
452 * @alloc: binder allocator bookkeeping
453 * @context: binder_context for this proc
454 * (invariant after initialized)
455 * @inner_lock: can nest under outer_lock and/or node lock
456 * @outer_lock: no nesting under innor or node lock
457 * Lock order: 1) outer, 2) node, 3) inner
458 * @binderfs_entry: process-specific binderfs log file
460 * Bookkeeping structure for binder processes
463 struct hlist_node proc_node;
464 struct rb_root threads;
465 struct rb_root nodes;
466 struct rb_root refs_by_desc;
467 struct rb_root refs_by_node;
468 struct list_head waiting_threads;
470 struct task_struct *tsk;
471 const struct cred *cred;
472 struct hlist_node deferred_work_node;
476 struct list_head todo;
477 struct binder_stats stats;
478 struct list_head delivered_death;
480 int requested_threads;
481 int requested_threads_started;
483 long default_priority;
484 struct dentry *debugfs_entry;
485 struct binder_alloc alloc;
486 struct binder_context *context;
487 spinlock_t inner_lock;
488 spinlock_t outer_lock;
489 struct dentry *binderfs_entry;
493 BINDER_LOOPER_STATE_REGISTERED = 0x01,
494 BINDER_LOOPER_STATE_ENTERED = 0x02,
495 BINDER_LOOPER_STATE_EXITED = 0x04,
496 BINDER_LOOPER_STATE_INVALID = 0x08,
497 BINDER_LOOPER_STATE_WAITING = 0x10,
498 BINDER_LOOPER_STATE_POLL = 0x20,
502 * struct binder_thread - binder thread bookkeeping
503 * @proc: binder process for this thread
504 * (invariant after initialization)
505 * @rb_node: element for proc->threads rbtree
506 * (protected by @proc->inner_lock)
507 * @waiting_thread_node: element for @proc->waiting_threads list
508 * (protected by @proc->inner_lock)
509 * @pid: PID for this thread
510 * (invariant after initialization)
511 * @looper: bitmap of looping state
512 * (only accessed by this thread)
513 * @looper_needs_return: looping thread needs to exit driver
515 * @transaction_stack: stack of in-progress transactions for this thread
516 * (protected by @proc->inner_lock)
517 * @todo: list of work to do for this thread
518 * (protected by @proc->inner_lock)
519 * @process_todo: whether work in @todo should be processed
520 * (protected by @proc->inner_lock)
521 * @return_error: transaction errors reported by this thread
522 * (only accessed by this thread)
523 * @reply_error: transaction errors reported by target thread
524 * (protected by @proc->inner_lock)
525 * @wait: wait queue for thread work
526 * @stats: per-thread statistics
527 * (atomics, no lock needed)
528 * @tmp_ref: temporary reference to indicate thread is in use
529 * (atomic since @proc->inner_lock cannot
530 * always be acquired)
531 * @is_dead: thread is dead and awaiting free
532 * when outstanding transactions are cleaned up
533 * (protected by @proc->inner_lock)
535 * Bookkeeping structure for binder threads.
537 struct binder_thread {
538 struct binder_proc *proc;
539 struct rb_node rb_node;
540 struct list_head waiting_thread_node;
542 int looper; /* only modified by this thread */
543 bool looper_need_return; /* can be written by other thread */
544 struct binder_transaction *transaction_stack;
545 struct list_head todo;
547 struct binder_error return_error;
548 struct binder_error reply_error;
549 wait_queue_head_t wait;
550 struct binder_stats stats;
556 * struct binder_txn_fd_fixup - transaction fd fixup list element
557 * @fixup_entry: list entry
558 * @file: struct file to be associated with new fd
559 * @offset: offset in buffer data to this fixup
561 * List element for fd fixups in a transaction. Since file
562 * descriptors need to be allocated in the context of the
563 * target process, we pass each fd to be processed in this
566 struct binder_txn_fd_fixup {
567 struct list_head fixup_entry;
572 struct binder_transaction {
574 struct binder_work work;
575 struct binder_thread *from;
576 struct binder_transaction *from_parent;
577 struct binder_proc *to_proc;
578 struct binder_thread *to_thread;
579 struct binder_transaction *to_parent;
580 unsigned need_reply:1;
581 /* unsigned is_dead:1; */ /* not used at the moment */
583 struct binder_buffer *buffer;
589 struct list_head fd_fixups;
590 binder_uintptr_t security_ctx;
592 * @lock: protects @from, @to_proc, and @to_thread
594 * @from, @to_proc, and @to_thread can be set to NULL
595 * during thread teardown
601 * struct binder_object - union of flat binder object types
602 * @hdr: generic object header
603 * @fbo: binder object (nodes and refs)
604 * @fdo: file descriptor object
605 * @bbo: binder buffer pointer
606 * @fdao: file descriptor array
608 * Used for type-independent object copies
610 struct binder_object {
612 struct binder_object_header hdr;
613 struct flat_binder_object fbo;
614 struct binder_fd_object fdo;
615 struct binder_buffer_object bbo;
616 struct binder_fd_array_object fdao;
621 * binder_proc_lock() - Acquire outer lock for given binder_proc
622 * @proc: struct binder_proc to acquire
624 * Acquires proc->outer_lock. Used to protect binder_ref
625 * structures associated with the given proc.
627 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
629 _binder_proc_lock(struct binder_proc *proc, int line)
630 __acquires(&proc->outer_lock)
632 binder_debug(BINDER_DEBUG_SPINLOCKS,
633 "%s: line=%d\n", __func__, line);
634 spin_lock(&proc->outer_lock);
638 * binder_proc_unlock() - Release spinlock for given binder_proc
639 * @proc: struct binder_proc to acquire
641 * Release lock acquired via binder_proc_lock()
643 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
645 _binder_proc_unlock(struct binder_proc *proc, int line)
646 __releases(&proc->outer_lock)
648 binder_debug(BINDER_DEBUG_SPINLOCKS,
649 "%s: line=%d\n", __func__, line);
650 spin_unlock(&proc->outer_lock);
654 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
655 * @proc: struct binder_proc to acquire
657 * Acquires proc->inner_lock. Used to protect todo lists
659 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
661 _binder_inner_proc_lock(struct binder_proc *proc, int line)
662 __acquires(&proc->inner_lock)
664 binder_debug(BINDER_DEBUG_SPINLOCKS,
665 "%s: line=%d\n", __func__, line);
666 spin_lock(&proc->inner_lock);
670 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
671 * @proc: struct binder_proc to acquire
673 * Release lock acquired via binder_inner_proc_lock()
675 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
677 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
678 __releases(&proc->inner_lock)
680 binder_debug(BINDER_DEBUG_SPINLOCKS,
681 "%s: line=%d\n", __func__, line);
682 spin_unlock(&proc->inner_lock);
686 * binder_node_lock() - Acquire spinlock for given binder_node
687 * @node: struct binder_node to acquire
689 * Acquires node->lock. Used to protect binder_node fields
691 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
693 _binder_node_lock(struct binder_node *node, int line)
694 __acquires(&node->lock)
696 binder_debug(BINDER_DEBUG_SPINLOCKS,
697 "%s: line=%d\n", __func__, line);
698 spin_lock(&node->lock);
702 * binder_node_unlock() - Release spinlock for given binder_proc
703 * @node: struct binder_node to acquire
705 * Release lock acquired via binder_node_lock()
707 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
709 _binder_node_unlock(struct binder_node *node, int line)
710 __releases(&node->lock)
712 binder_debug(BINDER_DEBUG_SPINLOCKS,
713 "%s: line=%d\n", __func__, line);
714 spin_unlock(&node->lock);
718 * binder_node_inner_lock() - Acquire node and inner locks
719 * @node: struct binder_node to acquire
721 * Acquires node->lock. If node->proc also acquires
722 * proc->inner_lock. Used to protect binder_node fields
724 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
726 _binder_node_inner_lock(struct binder_node *node, int line)
727 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
729 binder_debug(BINDER_DEBUG_SPINLOCKS,
730 "%s: line=%d\n", __func__, line);
731 spin_lock(&node->lock);
733 binder_inner_proc_lock(node->proc);
735 /* annotation for sparse */
736 __acquire(&node->proc->inner_lock);
740 * binder_node_unlock() - Release node and inner locks
741 * @node: struct binder_node to acquire
743 * Release lock acquired via binder_node_lock()
745 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
747 _binder_node_inner_unlock(struct binder_node *node, int line)
748 __releases(&node->lock) __releases(&node->proc->inner_lock)
750 struct binder_proc *proc = node->proc;
752 binder_debug(BINDER_DEBUG_SPINLOCKS,
753 "%s: line=%d\n", __func__, line);
755 binder_inner_proc_unlock(proc);
757 /* annotation for sparse */
758 __release(&node->proc->inner_lock);
759 spin_unlock(&node->lock);
762 static bool binder_worklist_empty_ilocked(struct list_head *list)
764 return list_empty(list);
768 * binder_worklist_empty() - Check if no items on the work list
769 * @proc: binder_proc associated with list
770 * @list: list to check
772 * Return: true if there are no items on list, else false
774 static bool binder_worklist_empty(struct binder_proc *proc,
775 struct list_head *list)
779 binder_inner_proc_lock(proc);
780 ret = binder_worklist_empty_ilocked(list);
781 binder_inner_proc_unlock(proc);
786 * binder_enqueue_work_ilocked() - Add an item to the work list
787 * @work: struct binder_work to add to list
788 * @target_list: list to add work to
790 * Adds the work to the specified list. Asserts that work
791 * is not already on a list.
793 * Requires the proc->inner_lock to be held.
796 binder_enqueue_work_ilocked(struct binder_work *work,
797 struct list_head *target_list)
799 BUG_ON(target_list == NULL);
800 BUG_ON(work->entry.next && !list_empty(&work->entry));
801 list_add_tail(&work->entry, target_list);
805 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
806 * @thread: thread to queue work to
807 * @work: struct binder_work to add to list
809 * Adds the work to the todo list of the thread. Doesn't set the process_todo
810 * flag, which means that (if it wasn't already set) the thread will go to
811 * sleep without handling this work when it calls read.
813 * Requires the proc->inner_lock to be held.
816 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
817 struct binder_work *work)
819 WARN_ON(!list_empty(&thread->waiting_thread_node));
820 binder_enqueue_work_ilocked(work, &thread->todo);
824 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
825 * @thread: thread to queue work to
826 * @work: struct binder_work to add to list
828 * Adds the work to the todo list of the thread, and enables processing
831 * Requires the proc->inner_lock to be held.
834 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
835 struct binder_work *work)
837 WARN_ON(!list_empty(&thread->waiting_thread_node));
838 binder_enqueue_work_ilocked(work, &thread->todo);
840 /* (e)poll-based threads require an explicit wakeup signal when
841 * queuing their own work; they rely on these events to consume
842 * messages without I/O block. Without it, threads risk waiting
843 * indefinitely without handling the work.
845 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
846 thread->pid == current->pid && !thread->process_todo)
847 wake_up_interruptible_sync(&thread->wait);
849 thread->process_todo = true;
853 * binder_enqueue_thread_work() - Add an item to the thread work list
854 * @thread: thread to queue work to
855 * @work: struct binder_work to add to list
857 * Adds the work to the todo list of the thread, and enables processing
861 binder_enqueue_thread_work(struct binder_thread *thread,
862 struct binder_work *work)
864 binder_inner_proc_lock(thread->proc);
865 binder_enqueue_thread_work_ilocked(thread, work);
866 binder_inner_proc_unlock(thread->proc);
870 binder_dequeue_work_ilocked(struct binder_work *work)
872 list_del_init(&work->entry);
876 * binder_dequeue_work() - Removes an item from the work list
877 * @proc: binder_proc associated with list
878 * @work: struct binder_work to remove from list
880 * Removes the specified work item from whatever list it is on.
881 * Can safely be called if work is not on any list.
884 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
886 binder_inner_proc_lock(proc);
887 binder_dequeue_work_ilocked(work);
888 binder_inner_proc_unlock(proc);
891 static struct binder_work *binder_dequeue_work_head_ilocked(
892 struct list_head *list)
894 struct binder_work *w;
896 w = list_first_entry_or_null(list, struct binder_work, entry);
898 list_del_init(&w->entry);
903 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
904 static void binder_free_thread(struct binder_thread *thread);
905 static void binder_free_proc(struct binder_proc *proc);
906 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
908 static bool binder_has_work_ilocked(struct binder_thread *thread,
911 return thread->process_todo ||
912 thread->looper_need_return ||
914 !binder_worklist_empty_ilocked(&thread->proc->todo));
917 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
921 binder_inner_proc_lock(thread->proc);
922 has_work = binder_has_work_ilocked(thread, do_proc_work);
923 binder_inner_proc_unlock(thread->proc);
928 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
930 return !thread->transaction_stack &&
931 binder_worklist_empty_ilocked(&thread->todo) &&
932 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
933 BINDER_LOOPER_STATE_REGISTERED));
936 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
940 struct binder_thread *thread;
942 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
943 thread = rb_entry(n, struct binder_thread, rb_node);
944 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
945 binder_available_for_proc_work_ilocked(thread)) {
947 wake_up_interruptible_sync(&thread->wait);
949 wake_up_interruptible(&thread->wait);
955 * binder_select_thread_ilocked() - selects a thread for doing proc work.
956 * @proc: process to select a thread from
958 * Note that calling this function moves the thread off the waiting_threads
959 * list, so it can only be woken up by the caller of this function, or a
960 * signal. Therefore, callers *should* always wake up the thread this function
963 * Return: If there's a thread currently waiting for process work,
964 * returns that thread. Otherwise returns NULL.
966 static struct binder_thread *
967 binder_select_thread_ilocked(struct binder_proc *proc)
969 struct binder_thread *thread;
971 assert_spin_locked(&proc->inner_lock);
972 thread = list_first_entry_or_null(&proc->waiting_threads,
973 struct binder_thread,
974 waiting_thread_node);
977 list_del_init(&thread->waiting_thread_node);
983 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
984 * @proc: process to wake up a thread in
985 * @thread: specific thread to wake-up (may be NULL)
986 * @sync: whether to do a synchronous wake-up
988 * This function wakes up a thread in the @proc process.
989 * The caller may provide a specific thread to wake-up in
990 * the @thread parameter. If @thread is NULL, this function
991 * will wake up threads that have called poll().
993 * Note that for this function to work as expected, callers
994 * should first call binder_select_thread() to find a thread
995 * to handle the work (if they don't have a thread already),
996 * and pass the result into the @thread parameter.
998 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
999 struct binder_thread *thread,
1002 assert_spin_locked(&proc->inner_lock);
1006 wake_up_interruptible_sync(&thread->wait);
1008 wake_up_interruptible(&thread->wait);
1012 /* Didn't find a thread waiting for proc work; this can happen
1014 * 1. All threads are busy handling transactions
1015 * In that case, one of those threads should call back into
1016 * the kernel driver soon and pick up this work.
1017 * 2. Threads are using the (e)poll interface, in which case
1018 * they may be blocked on the waitqueue without having been
1019 * added to waiting_threads. For this case, we just iterate
1020 * over all threads not handling transaction work, and
1021 * wake them all up. We wake all because we don't know whether
1022 * a thread that called into (e)poll is handling non-binder
1025 binder_wakeup_poll_threads_ilocked(proc, sync);
1028 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1030 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1032 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1035 static void binder_set_nice(long nice)
1039 if (can_nice(current, nice)) {
1040 set_user_nice(current, nice);
1043 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1044 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1045 "%d: nice value %ld not allowed use %ld instead\n",
1046 current->pid, nice, min_nice);
1047 set_user_nice(current, min_nice);
1048 if (min_nice <= MAX_NICE)
1050 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1053 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1054 binder_uintptr_t ptr)
1056 struct rb_node *n = proc->nodes.rb_node;
1057 struct binder_node *node;
1059 assert_spin_locked(&proc->inner_lock);
1062 node = rb_entry(n, struct binder_node, rb_node);
1064 if (ptr < node->ptr)
1066 else if (ptr > node->ptr)
1070 * take an implicit weak reference
1071 * to ensure node stays alive until
1072 * call to binder_put_node()
1074 binder_inc_node_tmpref_ilocked(node);
1081 static struct binder_node *binder_get_node(struct binder_proc *proc,
1082 binder_uintptr_t ptr)
1084 struct binder_node *node;
1086 binder_inner_proc_lock(proc);
1087 node = binder_get_node_ilocked(proc, ptr);
1088 binder_inner_proc_unlock(proc);
1092 static struct binder_node *binder_init_node_ilocked(
1093 struct binder_proc *proc,
1094 struct binder_node *new_node,
1095 struct flat_binder_object *fp)
1097 struct rb_node **p = &proc->nodes.rb_node;
1098 struct rb_node *parent = NULL;
1099 struct binder_node *node;
1100 binder_uintptr_t ptr = fp ? fp->binder : 0;
1101 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1102 __u32 flags = fp ? fp->flags : 0;
1104 assert_spin_locked(&proc->inner_lock);
1109 node = rb_entry(parent, struct binder_node, rb_node);
1111 if (ptr < node->ptr)
1113 else if (ptr > node->ptr)
1114 p = &(*p)->rb_right;
1117 * A matching node is already in
1118 * the rb tree. Abandon the init
1121 binder_inc_node_tmpref_ilocked(node);
1126 binder_stats_created(BINDER_STAT_NODE);
1128 rb_link_node(&node->rb_node, parent, p);
1129 rb_insert_color(&node->rb_node, &proc->nodes);
1130 node->debug_id = atomic_inc_return(&binder_last_id);
1133 node->cookie = cookie;
1134 node->work.type = BINDER_WORK_NODE;
1135 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1136 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1137 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1138 spin_lock_init(&node->lock);
1139 INIT_LIST_HEAD(&node->work.entry);
1140 INIT_LIST_HEAD(&node->async_todo);
1141 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1142 "%d:%d node %d u%016llx c%016llx created\n",
1143 proc->pid, current->pid, node->debug_id,
1144 (u64)node->ptr, (u64)node->cookie);
1149 static struct binder_node *binder_new_node(struct binder_proc *proc,
1150 struct flat_binder_object *fp)
1152 struct binder_node *node;
1153 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1157 binder_inner_proc_lock(proc);
1158 node = binder_init_node_ilocked(proc, new_node, fp);
1159 binder_inner_proc_unlock(proc);
1160 if (node != new_node)
1162 * The node was already added by another thread
1169 static void binder_free_node(struct binder_node *node)
1172 binder_stats_deleted(BINDER_STAT_NODE);
1175 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1177 struct list_head *target_list)
1179 struct binder_proc *proc = node->proc;
1181 assert_spin_locked(&node->lock);
1183 assert_spin_locked(&proc->inner_lock);
1186 if (target_list == NULL &&
1187 node->internal_strong_refs == 0 &&
1189 node == node->proc->context->binder_context_mgr_node &&
1190 node->has_strong_ref)) {
1191 pr_err("invalid inc strong node for %d\n",
1195 node->internal_strong_refs++;
1197 node->local_strong_refs++;
1198 if (!node->has_strong_ref && target_list) {
1199 struct binder_thread *thread = container_of(target_list,
1200 struct binder_thread, todo);
1201 binder_dequeue_work_ilocked(&node->work);
1202 BUG_ON(&thread->todo != target_list);
1203 binder_enqueue_deferred_thread_work_ilocked(thread,
1208 node->local_weak_refs++;
1209 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1210 if (target_list == NULL) {
1211 pr_err("invalid inc weak node for %d\n",
1218 binder_enqueue_work_ilocked(&node->work, target_list);
1224 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1225 struct list_head *target_list)
1229 binder_node_inner_lock(node);
1230 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1231 binder_node_inner_unlock(node);
1236 static bool binder_dec_node_nilocked(struct binder_node *node,
1237 int strong, int internal)
1239 struct binder_proc *proc = node->proc;
1241 assert_spin_locked(&node->lock);
1243 assert_spin_locked(&proc->inner_lock);
1246 node->internal_strong_refs--;
1248 node->local_strong_refs--;
1249 if (node->local_strong_refs || node->internal_strong_refs)
1253 node->local_weak_refs--;
1254 if (node->local_weak_refs || node->tmp_refs ||
1255 !hlist_empty(&node->refs))
1259 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1260 if (list_empty(&node->work.entry)) {
1261 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1262 binder_wakeup_proc_ilocked(proc);
1265 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1266 !node->local_weak_refs && !node->tmp_refs) {
1268 binder_dequeue_work_ilocked(&node->work);
1269 rb_erase(&node->rb_node, &proc->nodes);
1270 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1271 "refless node %d deleted\n",
1274 BUG_ON(!list_empty(&node->work.entry));
1275 spin_lock(&binder_dead_nodes_lock);
1277 * tmp_refs could have changed so
1280 if (node->tmp_refs) {
1281 spin_unlock(&binder_dead_nodes_lock);
1284 hlist_del(&node->dead_node);
1285 spin_unlock(&binder_dead_nodes_lock);
1286 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1287 "dead node %d deleted\n",
1296 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1300 binder_node_inner_lock(node);
1301 free_node = binder_dec_node_nilocked(node, strong, internal);
1302 binder_node_inner_unlock(node);
1304 binder_free_node(node);
1307 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1310 * No call to binder_inc_node() is needed since we
1311 * don't need to inform userspace of any changes to
1318 * binder_inc_node_tmpref() - take a temporary reference on node
1319 * @node: node to reference
1321 * Take reference on node to prevent the node from being freed
1322 * while referenced only by a local variable. The inner lock is
1323 * needed to serialize with the node work on the queue (which
1324 * isn't needed after the node is dead). If the node is dead
1325 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1326 * node->tmp_refs against dead-node-only cases where the node
1327 * lock cannot be acquired (eg traversing the dead node list to
1330 static void binder_inc_node_tmpref(struct binder_node *node)
1332 binder_node_lock(node);
1334 binder_inner_proc_lock(node->proc);
1336 spin_lock(&binder_dead_nodes_lock);
1337 binder_inc_node_tmpref_ilocked(node);
1339 binder_inner_proc_unlock(node->proc);
1341 spin_unlock(&binder_dead_nodes_lock);
1342 binder_node_unlock(node);
1346 * binder_dec_node_tmpref() - remove a temporary reference on node
1347 * @node: node to reference
1349 * Release temporary reference on node taken via binder_inc_node_tmpref()
1351 static void binder_dec_node_tmpref(struct binder_node *node)
1355 binder_node_inner_lock(node);
1357 spin_lock(&binder_dead_nodes_lock);
1359 __acquire(&binder_dead_nodes_lock);
1361 BUG_ON(node->tmp_refs < 0);
1363 spin_unlock(&binder_dead_nodes_lock);
1365 __release(&binder_dead_nodes_lock);
1367 * Call binder_dec_node() to check if all refcounts are 0
1368 * and cleanup is needed. Calling with strong=0 and internal=1
1369 * causes no actual reference to be released in binder_dec_node().
1370 * If that changes, a change is needed here too.
1372 free_node = binder_dec_node_nilocked(node, 0, 1);
1373 binder_node_inner_unlock(node);
1375 binder_free_node(node);
1378 static void binder_put_node(struct binder_node *node)
1380 binder_dec_node_tmpref(node);
1383 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1384 u32 desc, bool need_strong_ref)
1386 struct rb_node *n = proc->refs_by_desc.rb_node;
1387 struct binder_ref *ref;
1390 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1392 if (desc < ref->data.desc) {
1394 } else if (desc > ref->data.desc) {
1396 } else if (need_strong_ref && !ref->data.strong) {
1397 binder_user_error("tried to use weak ref as strong ref\n");
1407 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1408 * @proc: binder_proc that owns the ref
1409 * @node: binder_node of target
1410 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1412 * Look up the ref for the given node and return it if it exists
1414 * If it doesn't exist and the caller provides a newly allocated
1415 * ref, initialize the fields of the newly allocated ref and insert
1416 * into the given proc rb_trees and node refs list.
1418 * Return: the ref for node. It is possible that another thread
1419 * allocated/initialized the ref first in which case the
1420 * returned ref would be different than the passed-in
1421 * new_ref. new_ref must be kfree'd by the caller in
1424 static struct binder_ref *binder_get_ref_for_node_olocked(
1425 struct binder_proc *proc,
1426 struct binder_node *node,
1427 struct binder_ref *new_ref)
1429 struct binder_context *context = proc->context;
1430 struct rb_node **p = &proc->refs_by_node.rb_node;
1431 struct rb_node *parent = NULL;
1432 struct binder_ref *ref;
1437 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1439 if (node < ref->node)
1441 else if (node > ref->node)
1442 p = &(*p)->rb_right;
1449 binder_stats_created(BINDER_STAT_REF);
1450 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1451 new_ref->proc = proc;
1452 new_ref->node = node;
1453 rb_link_node(&new_ref->rb_node_node, parent, p);
1454 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1456 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1457 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1458 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1459 if (ref->data.desc > new_ref->data.desc)
1461 new_ref->data.desc = ref->data.desc + 1;
1464 p = &proc->refs_by_desc.rb_node;
1467 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1469 if (new_ref->data.desc < ref->data.desc)
1471 else if (new_ref->data.desc > ref->data.desc)
1472 p = &(*p)->rb_right;
1476 rb_link_node(&new_ref->rb_node_desc, parent, p);
1477 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1479 binder_node_lock(node);
1480 hlist_add_head(&new_ref->node_entry, &node->refs);
1482 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1483 "%d new ref %d desc %d for node %d\n",
1484 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1486 binder_node_unlock(node);
1490 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1492 bool delete_node = false;
1494 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1495 "%d delete ref %d desc %d for node %d\n",
1496 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1497 ref->node->debug_id);
1499 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1500 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1502 binder_node_inner_lock(ref->node);
1503 if (ref->data.strong)
1504 binder_dec_node_nilocked(ref->node, 1, 1);
1506 hlist_del(&ref->node_entry);
1507 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1508 binder_node_inner_unlock(ref->node);
1510 * Clear ref->node unless we want the caller to free the node
1514 * The caller uses ref->node to determine
1515 * whether the node needs to be freed. Clear
1516 * it since the node is still alive.
1522 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1523 "%d delete ref %d desc %d has death notification\n",
1524 ref->proc->pid, ref->data.debug_id,
1526 binder_dequeue_work(ref->proc, &ref->death->work);
1527 binder_stats_deleted(BINDER_STAT_DEATH);
1529 binder_stats_deleted(BINDER_STAT_REF);
1533 * binder_inc_ref_olocked() - increment the ref for given handle
1534 * @ref: ref to be incremented
1535 * @strong: if true, strong increment, else weak
1536 * @target_list: list to queue node work on
1538 * Increment the ref. @ref->proc->outer_lock must be held on entry
1540 * Return: 0, if successful, else errno
1542 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1543 struct list_head *target_list)
1548 if (ref->data.strong == 0) {
1549 ret = binder_inc_node(ref->node, 1, 1, target_list);
1555 if (ref->data.weak == 0) {
1556 ret = binder_inc_node(ref->node, 0, 1, target_list);
1566 * binder_dec_ref() - dec the ref for given handle
1567 * @ref: ref to be decremented
1568 * @strong: if true, strong decrement, else weak
1570 * Decrement the ref.
1572 * Return: true if ref is cleaned up and ready to be freed
1574 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1577 if (ref->data.strong == 0) {
1578 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1579 ref->proc->pid, ref->data.debug_id,
1580 ref->data.desc, ref->data.strong,
1585 if (ref->data.strong == 0)
1586 binder_dec_node(ref->node, strong, 1);
1588 if (ref->data.weak == 0) {
1589 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1590 ref->proc->pid, ref->data.debug_id,
1591 ref->data.desc, ref->data.strong,
1597 if (ref->data.strong == 0 && ref->data.weak == 0) {
1598 binder_cleanup_ref_olocked(ref);
1605 * binder_get_node_from_ref() - get the node from the given proc/desc
1606 * @proc: proc containing the ref
1607 * @desc: the handle associated with the ref
1608 * @need_strong_ref: if true, only return node if ref is strong
1609 * @rdata: the id/refcount data for the ref
1611 * Given a proc and ref handle, return the associated binder_node
1613 * Return: a binder_node or NULL if not found or not strong when strong required
1615 static struct binder_node *binder_get_node_from_ref(
1616 struct binder_proc *proc,
1617 u32 desc, bool need_strong_ref,
1618 struct binder_ref_data *rdata)
1620 struct binder_node *node;
1621 struct binder_ref *ref;
1623 binder_proc_lock(proc);
1624 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1629 * Take an implicit reference on the node to ensure
1630 * it stays alive until the call to binder_put_node()
1632 binder_inc_node_tmpref(node);
1635 binder_proc_unlock(proc);
1640 binder_proc_unlock(proc);
1645 * binder_free_ref() - free the binder_ref
1648 * Free the binder_ref. Free the binder_node indicated by ref->node
1649 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1651 static void binder_free_ref(struct binder_ref *ref)
1654 binder_free_node(ref->node);
1660 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1661 * @proc: proc containing the ref
1662 * @desc: the handle associated with the ref
1663 * @increment: true=inc reference, false=dec reference
1664 * @strong: true=strong reference, false=weak reference
1665 * @rdata: the id/refcount data for the ref
1667 * Given a proc and ref handle, increment or decrement the ref
1668 * according to "increment" arg.
1670 * Return: 0 if successful, else errno
1672 static int binder_update_ref_for_handle(struct binder_proc *proc,
1673 uint32_t desc, bool increment, bool strong,
1674 struct binder_ref_data *rdata)
1677 struct binder_ref *ref;
1678 bool delete_ref = false;
1680 binder_proc_lock(proc);
1681 ref = binder_get_ref_olocked(proc, desc, strong);
1687 ret = binder_inc_ref_olocked(ref, strong, NULL);
1689 delete_ref = binder_dec_ref_olocked(ref, strong);
1693 binder_proc_unlock(proc);
1696 binder_free_ref(ref);
1700 binder_proc_unlock(proc);
1705 * binder_dec_ref_for_handle() - dec the ref for given handle
1706 * @proc: proc containing the ref
1707 * @desc: the handle associated with the ref
1708 * @strong: true=strong reference, false=weak reference
1709 * @rdata: the id/refcount data for the ref
1711 * Just calls binder_update_ref_for_handle() to decrement the ref.
1713 * Return: 0 if successful, else errno
1715 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1716 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1718 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1723 * binder_inc_ref_for_node() - increment the ref for given proc/node
1724 * @proc: proc containing the ref
1725 * @node: target node
1726 * @strong: true=strong reference, false=weak reference
1727 * @target_list: worklist to use if node is incremented
1728 * @rdata: the id/refcount data for the ref
1730 * Given a proc and node, increment the ref. Create the ref if it
1731 * doesn't already exist
1733 * Return: 0 if successful, else errno
1735 static int binder_inc_ref_for_node(struct binder_proc *proc,
1736 struct binder_node *node,
1738 struct list_head *target_list,
1739 struct binder_ref_data *rdata)
1741 struct binder_ref *ref;
1742 struct binder_ref *new_ref = NULL;
1745 binder_proc_lock(proc);
1746 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1748 binder_proc_unlock(proc);
1749 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1752 binder_proc_lock(proc);
1753 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1755 ret = binder_inc_ref_olocked(ref, strong, target_list);
1757 if (ret && ref == new_ref) {
1759 * Cleanup the failed reference here as the target
1760 * could now be dead and have already released its
1761 * references by now. Calling on the new reference
1762 * with strong=0 and a tmp_refs will not decrement
1763 * the node. The new_ref gets kfree'd below.
1765 binder_cleanup_ref_olocked(new_ref);
1769 binder_proc_unlock(proc);
1770 if (new_ref && ref != new_ref)
1772 * Another thread created the ref first so
1773 * free the one we allocated
1779 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1780 struct binder_transaction *t)
1782 BUG_ON(!target_thread);
1783 assert_spin_locked(&target_thread->proc->inner_lock);
1784 BUG_ON(target_thread->transaction_stack != t);
1785 BUG_ON(target_thread->transaction_stack->from != target_thread);
1786 target_thread->transaction_stack =
1787 target_thread->transaction_stack->from_parent;
1792 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1793 * @thread: thread to decrement
1795 * A thread needs to be kept alive while being used to create or
1796 * handle a transaction. binder_get_txn_from() is used to safely
1797 * extract t->from from a binder_transaction and keep the thread
1798 * indicated by t->from from being freed. When done with that
1799 * binder_thread, this function is called to decrement the
1800 * tmp_ref and free if appropriate (thread has been released
1801 * and no transaction being processed by the driver)
1803 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1806 * atomic is used to protect the counter value while
1807 * it cannot reach zero or thread->is_dead is false
1809 binder_inner_proc_lock(thread->proc);
1810 atomic_dec(&thread->tmp_ref);
1811 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1812 binder_inner_proc_unlock(thread->proc);
1813 binder_free_thread(thread);
1816 binder_inner_proc_unlock(thread->proc);
1820 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1821 * @proc: proc to decrement
1823 * A binder_proc needs to be kept alive while being used to create or
1824 * handle a transaction. proc->tmp_ref is incremented when
1825 * creating a new transaction or the binder_proc is currently in-use
1826 * by threads that are being released. When done with the binder_proc,
1827 * this function is called to decrement the counter and free the
1828 * proc if appropriate (proc has been released, all threads have
1829 * been released and not currenly in-use to process a transaction).
1831 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1833 binder_inner_proc_lock(proc);
1835 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1837 binder_inner_proc_unlock(proc);
1838 binder_free_proc(proc);
1841 binder_inner_proc_unlock(proc);
1845 * binder_get_txn_from() - safely extract the "from" thread in transaction
1846 * @t: binder transaction for t->from
1848 * Atomically return the "from" thread and increment the tmp_ref
1849 * count for the thread to ensure it stays alive until
1850 * binder_thread_dec_tmpref() is called.
1852 * Return: the value of t->from
1854 static struct binder_thread *binder_get_txn_from(
1855 struct binder_transaction *t)
1857 struct binder_thread *from;
1859 spin_lock(&t->lock);
1862 atomic_inc(&from->tmp_ref);
1863 spin_unlock(&t->lock);
1868 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1869 * @t: binder transaction for t->from
1871 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1872 * to guarantee that the thread cannot be released while operating on it.
1873 * The caller must call binder_inner_proc_unlock() to release the inner lock
1874 * as well as call binder_dec_thread_txn() to release the reference.
1876 * Return: the value of t->from
1878 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1879 struct binder_transaction *t)
1880 __acquires(&t->from->proc->inner_lock)
1882 struct binder_thread *from;
1884 from = binder_get_txn_from(t);
1886 __acquire(&from->proc->inner_lock);
1889 binder_inner_proc_lock(from->proc);
1891 BUG_ON(from != t->from);
1894 binder_inner_proc_unlock(from->proc);
1895 __acquire(&from->proc->inner_lock);
1896 binder_thread_dec_tmpref(from);
1901 * binder_free_txn_fixups() - free unprocessed fd fixups
1902 * @t: binder transaction for t->from
1904 * If the transaction is being torn down prior to being
1905 * processed by the target process, free all of the
1906 * fd fixups and fput the file structs. It is safe to
1907 * call this function after the fixups have been
1908 * processed -- in that case, the list will be empty.
1910 static void binder_free_txn_fixups(struct binder_transaction *t)
1912 struct binder_txn_fd_fixup *fixup, *tmp;
1914 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1916 list_del(&fixup->fixup_entry);
1921 static void binder_free_transaction(struct binder_transaction *t)
1923 struct binder_proc *target_proc = t->to_proc;
1926 binder_inner_proc_lock(target_proc);
1928 t->buffer->transaction = NULL;
1929 binder_inner_proc_unlock(target_proc);
1932 * If the transaction has no target_proc, then
1933 * t->buffer->transaction has already been cleared.
1935 binder_free_txn_fixups(t);
1937 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1940 static void binder_send_failed_reply(struct binder_transaction *t,
1941 uint32_t error_code)
1943 struct binder_thread *target_thread;
1944 struct binder_transaction *next;
1946 BUG_ON(t->flags & TF_ONE_WAY);
1948 target_thread = binder_get_txn_from_and_acq_inner(t);
1949 if (target_thread) {
1950 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1951 "send failed reply for transaction %d to %d:%d\n",
1953 target_thread->proc->pid,
1954 target_thread->pid);
1956 binder_pop_transaction_ilocked(target_thread, t);
1957 if (target_thread->reply_error.cmd == BR_OK) {
1958 target_thread->reply_error.cmd = error_code;
1959 binder_enqueue_thread_work_ilocked(
1961 &target_thread->reply_error.work);
1962 wake_up_interruptible(&target_thread->wait);
1965 * Cannot get here for normal operation, but
1966 * we can if multiple synchronous transactions
1967 * are sent without blocking for responses.
1968 * Just ignore the 2nd error in this case.
1970 pr_warn("Unexpected reply error: %u\n",
1971 target_thread->reply_error.cmd);
1973 binder_inner_proc_unlock(target_thread->proc);
1974 binder_thread_dec_tmpref(target_thread);
1975 binder_free_transaction(t);
1978 __release(&target_thread->proc->inner_lock);
1979 next = t->from_parent;
1981 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1982 "send failed reply for transaction %d, target dead\n",
1985 binder_free_transaction(t);
1987 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1988 "reply failed, no target thread at root\n");
1992 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1993 "reply failed, no target thread -- retry %d\n",
1999 * binder_cleanup_transaction() - cleans up undelivered transaction
2000 * @t: transaction that needs to be cleaned up
2001 * @reason: reason the transaction wasn't delivered
2002 * @error_code: error to return to caller (if synchronous call)
2004 static void binder_cleanup_transaction(struct binder_transaction *t,
2006 uint32_t error_code)
2008 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2009 binder_send_failed_reply(t, error_code);
2011 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2012 "undelivered transaction %d, %s\n",
2013 t->debug_id, reason);
2014 binder_free_transaction(t);
2019 * binder_get_object() - gets object and checks for valid metadata
2020 * @proc: binder_proc owning the buffer
2021 * @u: sender's user pointer to base of buffer
2022 * @buffer: binder_buffer that we're parsing.
2023 * @offset: offset in the @buffer at which to validate an object.
2024 * @object: struct binder_object to read into
2026 * Copy the binder object at the given offset into @object. If @u is
2027 * provided then the copy is from the sender's buffer. If not, then
2028 * it is copied from the target's @buffer.
2030 * Return: If there's a valid metadata object at @offset, the
2031 * size of that object. Otherwise, it returns zero. The object
2032 * is read into the struct binder_object pointed to by @object.
2034 static size_t binder_get_object(struct binder_proc *proc,
2035 const void __user *u,
2036 struct binder_buffer *buffer,
2037 unsigned long offset,
2038 struct binder_object *object)
2041 struct binder_object_header *hdr;
2042 size_t object_size = 0;
2044 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2045 if (offset > buffer->data_size || read_size < sizeof(*hdr))
2048 if (copy_from_user(object, u + offset, read_size))
2051 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2056 /* Ok, now see if we read a complete object. */
2058 switch (hdr->type) {
2059 case BINDER_TYPE_BINDER:
2060 case BINDER_TYPE_WEAK_BINDER:
2061 case BINDER_TYPE_HANDLE:
2062 case BINDER_TYPE_WEAK_HANDLE:
2063 object_size = sizeof(struct flat_binder_object);
2065 case BINDER_TYPE_FD:
2066 object_size = sizeof(struct binder_fd_object);
2068 case BINDER_TYPE_PTR:
2069 object_size = sizeof(struct binder_buffer_object);
2071 case BINDER_TYPE_FDA:
2072 object_size = sizeof(struct binder_fd_array_object);
2077 if (offset <= buffer->data_size - object_size &&
2078 buffer->data_size >= object_size)
2085 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2086 * @proc: binder_proc owning the buffer
2087 * @b: binder_buffer containing the object
2088 * @object: struct binder_object to read into
2089 * @index: index in offset array at which the binder_buffer_object is
2091 * @start_offset: points to the start of the offset array
2092 * @object_offsetp: offset of @object read from @b
2093 * @num_valid: the number of valid offsets in the offset array
2095 * Return: If @index is within the valid range of the offset array
2096 * described by @start and @num_valid, and if there's a valid
2097 * binder_buffer_object at the offset found in index @index
2098 * of the offset array, that object is returned. Otherwise,
2099 * %NULL is returned.
2100 * Note that the offset found in index @index itself is not
2101 * verified; this function assumes that @num_valid elements
2102 * from @start were previously verified to have valid offsets.
2103 * If @object_offsetp is non-NULL, then the offset within
2104 * @b is written to it.
2106 static struct binder_buffer_object *binder_validate_ptr(
2107 struct binder_proc *proc,
2108 struct binder_buffer *b,
2109 struct binder_object *object,
2110 binder_size_t index,
2111 binder_size_t start_offset,
2112 binder_size_t *object_offsetp,
2113 binder_size_t num_valid)
2116 binder_size_t object_offset;
2117 unsigned long buffer_offset;
2119 if (index >= num_valid)
2122 buffer_offset = start_offset + sizeof(binder_size_t) * index;
2123 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2125 sizeof(object_offset)))
2127 object_size = binder_get_object(proc, NULL, b, object_offset, object);
2128 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2131 *object_offsetp = object_offset;
2133 return &object->bbo;
2137 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2138 * @proc: binder_proc owning the buffer
2139 * @b: transaction buffer
2140 * @objects_start_offset: offset to start of objects buffer
2141 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
2142 * @fixup_offset: start offset in @buffer to fix up
2143 * @last_obj_offset: offset to last binder_buffer_object that we fixed
2144 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
2146 * Return: %true if a fixup in buffer @buffer at offset @offset is
2149 * For safety reasons, we only allow fixups inside a buffer to happen
2150 * at increasing offsets; additionally, we only allow fixup on the last
2151 * buffer object that was verified, or one of its parents.
2153 * Example of what is allowed:
2156 * B (parent = A, offset = 0)
2157 * C (parent = A, offset = 16)
2158 * D (parent = C, offset = 0)
2159 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2161 * Examples of what is not allowed:
2163 * Decreasing offsets within the same parent:
2165 * C (parent = A, offset = 16)
2166 * B (parent = A, offset = 0) // decreasing offset within A
2168 * Referring to a parent that wasn't the last object or any of its parents:
2170 * B (parent = A, offset = 0)
2171 * C (parent = A, offset = 0)
2172 * C (parent = A, offset = 16)
2173 * D (parent = B, offset = 0) // B is not A or any of A's parents
2175 static bool binder_validate_fixup(struct binder_proc *proc,
2176 struct binder_buffer *b,
2177 binder_size_t objects_start_offset,
2178 binder_size_t buffer_obj_offset,
2179 binder_size_t fixup_offset,
2180 binder_size_t last_obj_offset,
2181 binder_size_t last_min_offset)
2183 if (!last_obj_offset) {
2184 /* Nothing to fix up in */
2188 while (last_obj_offset != buffer_obj_offset) {
2189 unsigned long buffer_offset;
2190 struct binder_object last_object;
2191 struct binder_buffer_object *last_bbo;
2192 size_t object_size = binder_get_object(proc, NULL, b,
2195 if (object_size != sizeof(*last_bbo))
2198 last_bbo = &last_object.bbo;
2200 * Safe to retrieve the parent of last_obj, since it
2201 * was already previously verified by the driver.
2203 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2205 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2206 buffer_offset = objects_start_offset +
2207 sizeof(binder_size_t) * last_bbo->parent;
2208 if (binder_alloc_copy_from_buffer(&proc->alloc,
2211 sizeof(last_obj_offset)))
2214 return (fixup_offset >= last_min_offset);
2218 * struct binder_task_work_cb - for deferred close
2220 * @twork: callback_head for task work
2223 * Structure to pass task work to be handled after
2224 * returning from binder_ioctl() via task_work_add().
2226 struct binder_task_work_cb {
2227 struct callback_head twork;
2232 * binder_do_fd_close() - close list of file descriptors
2233 * @twork: callback head for task work
2235 * It is not safe to call ksys_close() during the binder_ioctl()
2236 * function if there is a chance that binder's own file descriptor
2237 * might be closed. This is to meet the requirements for using
2238 * fdget() (see comments for __fget_light()). Therefore use
2239 * task_work_add() to schedule the close operation once we have
2240 * returned from binder_ioctl(). This function is a callback
2241 * for that mechanism and does the actual ksys_close() on the
2242 * given file descriptor.
2244 static void binder_do_fd_close(struct callback_head *twork)
2246 struct binder_task_work_cb *twcb = container_of(twork,
2247 struct binder_task_work_cb, twork);
2254 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2255 * @fd: file-descriptor to close
2257 * See comments in binder_do_fd_close(). This function is used to schedule
2258 * a file-descriptor to be closed after returning from binder_ioctl().
2260 static void binder_deferred_fd_close(int fd)
2262 struct binder_task_work_cb *twcb;
2264 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2267 init_task_work(&twcb->twork, binder_do_fd_close);
2268 close_fd_get_file(fd, &twcb->file);
2270 filp_close(twcb->file, current->files);
2271 task_work_add(current, &twcb->twork, TWA_RESUME);
2277 static void binder_transaction_buffer_release(struct binder_proc *proc,
2278 struct binder_thread *thread,
2279 struct binder_buffer *buffer,
2280 binder_size_t off_end_offset,
2283 int debug_id = buffer->debug_id;
2284 binder_size_t off_start_offset, buffer_offset;
2286 binder_debug(BINDER_DEBUG_TRANSACTION,
2287 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2288 proc->pid, buffer->debug_id,
2289 buffer->data_size, buffer->offsets_size,
2290 (unsigned long long)off_end_offset);
2292 if (buffer->target_node)
2293 binder_dec_node(buffer->target_node, 1, 0);
2295 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2297 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2298 buffer_offset += sizeof(binder_size_t)) {
2299 struct binder_object_header *hdr;
2300 size_t object_size = 0;
2301 struct binder_object object;
2302 binder_size_t object_offset;
2304 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2305 buffer, buffer_offset,
2306 sizeof(object_offset)))
2307 object_size = binder_get_object(proc, NULL, buffer,
2308 object_offset, &object);
2309 if (object_size == 0) {
2310 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2311 debug_id, (u64)object_offset, buffer->data_size);
2315 switch (hdr->type) {
2316 case BINDER_TYPE_BINDER:
2317 case BINDER_TYPE_WEAK_BINDER: {
2318 struct flat_binder_object *fp;
2319 struct binder_node *node;
2321 fp = to_flat_binder_object(hdr);
2322 node = binder_get_node(proc, fp->binder);
2324 pr_err("transaction release %d bad node %016llx\n",
2325 debug_id, (u64)fp->binder);
2328 binder_debug(BINDER_DEBUG_TRANSACTION,
2329 " node %d u%016llx\n",
2330 node->debug_id, (u64)node->ptr);
2331 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2333 binder_put_node(node);
2335 case BINDER_TYPE_HANDLE:
2336 case BINDER_TYPE_WEAK_HANDLE: {
2337 struct flat_binder_object *fp;
2338 struct binder_ref_data rdata;
2341 fp = to_flat_binder_object(hdr);
2342 ret = binder_dec_ref_for_handle(proc, fp->handle,
2343 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2346 pr_err("transaction release %d bad handle %d, ret = %d\n",
2347 debug_id, fp->handle, ret);
2350 binder_debug(BINDER_DEBUG_TRANSACTION,
2351 " ref %d desc %d\n",
2352 rdata.debug_id, rdata.desc);
2355 case BINDER_TYPE_FD: {
2357 * No need to close the file here since user-space
2358 * closes it for for successfully delivered
2359 * transactions. For transactions that weren't
2360 * delivered, the new fd was never allocated so
2361 * there is no need to close and the fput on the
2362 * file is done when the transaction is torn
2366 case BINDER_TYPE_PTR:
2368 * Nothing to do here, this will get cleaned up when the
2369 * transaction buffer gets freed
2372 case BINDER_TYPE_FDA: {
2373 struct binder_fd_array_object *fda;
2374 struct binder_buffer_object *parent;
2375 struct binder_object ptr_object;
2376 binder_size_t fda_offset;
2378 binder_size_t fd_buf_size;
2379 binder_size_t num_valid;
2383 * The fd fixups have not been applied so no
2384 * fds need to be closed.
2389 num_valid = (buffer_offset - off_start_offset) /
2390 sizeof(binder_size_t);
2391 fda = to_binder_fd_array_object(hdr);
2392 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2398 pr_err("transaction release %d bad parent offset\n",
2402 fd_buf_size = sizeof(u32) * fda->num_fds;
2403 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2404 pr_err("transaction release %d invalid number of fds (%lld)\n",
2405 debug_id, (u64)fda->num_fds);
2408 if (fd_buf_size > parent->length ||
2409 fda->parent_offset > parent->length - fd_buf_size) {
2410 /* No space for all file descriptors here. */
2411 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2412 debug_id, (u64)fda->num_fds);
2416 * the source data for binder_buffer_object is visible
2417 * to user-space and the @buffer element is the user
2418 * pointer to the buffer_object containing the fd_array.
2419 * Convert the address to an offset relative to
2420 * the base of the transaction buffer.
2423 (parent->buffer - (uintptr_t)buffer->user_data) +
2425 for (fd_index = 0; fd_index < fda->num_fds;
2429 binder_size_t offset = fda_offset +
2430 fd_index * sizeof(fd);
2432 err = binder_alloc_copy_from_buffer(
2433 &proc->alloc, &fd, buffer,
2434 offset, sizeof(fd));
2437 binder_deferred_fd_close(fd);
2439 * Need to make sure the thread goes
2440 * back to userspace to complete the
2444 thread->looper_need_return = true;
2449 pr_err("transaction release %d bad object type %x\n",
2450 debug_id, hdr->type);
2456 /* Clean up all the objects in the buffer */
2457 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2458 struct binder_thread *thread,
2459 struct binder_buffer *buffer,
2462 binder_size_t off_end_offset;
2464 off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2465 off_end_offset += buffer->offsets_size;
2467 binder_transaction_buffer_release(proc, thread, buffer,
2468 off_end_offset, is_failure);
2471 static int binder_translate_binder(struct flat_binder_object *fp,
2472 struct binder_transaction *t,
2473 struct binder_thread *thread)
2475 struct binder_node *node;
2476 struct binder_proc *proc = thread->proc;
2477 struct binder_proc *target_proc = t->to_proc;
2478 struct binder_ref_data rdata;
2481 node = binder_get_node(proc, fp->binder);
2483 node = binder_new_node(proc, fp);
2487 if (fp->cookie != node->cookie) {
2488 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2489 proc->pid, thread->pid, (u64)fp->binder,
2490 node->debug_id, (u64)fp->cookie,
2495 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2500 ret = binder_inc_ref_for_node(target_proc, node,
2501 fp->hdr.type == BINDER_TYPE_BINDER,
2502 &thread->todo, &rdata);
2506 if (fp->hdr.type == BINDER_TYPE_BINDER)
2507 fp->hdr.type = BINDER_TYPE_HANDLE;
2509 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2511 fp->handle = rdata.desc;
2514 trace_binder_transaction_node_to_ref(t, node, &rdata);
2515 binder_debug(BINDER_DEBUG_TRANSACTION,
2516 " node %d u%016llx -> ref %d desc %d\n",
2517 node->debug_id, (u64)node->ptr,
2518 rdata.debug_id, rdata.desc);
2520 binder_put_node(node);
2524 static int binder_translate_handle(struct flat_binder_object *fp,
2525 struct binder_transaction *t,
2526 struct binder_thread *thread)
2528 struct binder_proc *proc = thread->proc;
2529 struct binder_proc *target_proc = t->to_proc;
2530 struct binder_node *node;
2531 struct binder_ref_data src_rdata;
2534 node = binder_get_node_from_ref(proc, fp->handle,
2535 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2537 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2538 proc->pid, thread->pid, fp->handle);
2541 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2546 binder_node_lock(node);
2547 if (node->proc == target_proc) {
2548 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2549 fp->hdr.type = BINDER_TYPE_BINDER;
2551 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2552 fp->binder = node->ptr;
2553 fp->cookie = node->cookie;
2555 binder_inner_proc_lock(node->proc);
2557 __acquire(&node->proc->inner_lock);
2558 binder_inc_node_nilocked(node,
2559 fp->hdr.type == BINDER_TYPE_BINDER,
2562 binder_inner_proc_unlock(node->proc);
2564 __release(&node->proc->inner_lock);
2565 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2566 binder_debug(BINDER_DEBUG_TRANSACTION,
2567 " ref %d desc %d -> node %d u%016llx\n",
2568 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2570 binder_node_unlock(node);
2572 struct binder_ref_data dest_rdata;
2574 binder_node_unlock(node);
2575 ret = binder_inc_ref_for_node(target_proc, node,
2576 fp->hdr.type == BINDER_TYPE_HANDLE,
2582 fp->handle = dest_rdata.desc;
2584 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2586 binder_debug(BINDER_DEBUG_TRANSACTION,
2587 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2588 src_rdata.debug_id, src_rdata.desc,
2589 dest_rdata.debug_id, dest_rdata.desc,
2593 binder_put_node(node);
2597 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2598 struct binder_transaction *t,
2599 struct binder_thread *thread,
2600 struct binder_transaction *in_reply_to)
2602 struct binder_proc *proc = thread->proc;
2603 struct binder_proc *target_proc = t->to_proc;
2604 struct binder_txn_fd_fixup *fixup;
2607 bool target_allows_fd;
2610 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2612 target_allows_fd = t->buffer->target_node->accept_fds;
2613 if (!target_allows_fd) {
2614 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2615 proc->pid, thread->pid,
2616 in_reply_to ? "reply" : "transaction",
2619 goto err_fd_not_accepted;
2624 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2625 proc->pid, thread->pid, fd);
2629 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2636 * Add fixup record for this transaction. The allocation
2637 * of the fd in the target needs to be done from a
2640 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2646 fixup->offset = fd_offset;
2647 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2648 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2656 err_fd_not_accepted:
2661 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2662 * @offset offset in target buffer to fixup
2663 * @skip_size bytes to skip in copy (fixup will be written later)
2664 * @fixup_data data to write at fixup offset
2667 * This is used for the pointer fixup list (pf) which is created and consumed
2668 * during binder_transaction() and is only accessed locally. No
2669 * locking is necessary.
2671 * The list is ordered by @offset.
2673 struct binder_ptr_fixup {
2674 binder_size_t offset;
2676 binder_uintptr_t fixup_data;
2677 struct list_head node;
2681 * struct binder_sg_copy - scatter-gather data to be copied
2682 * @offset offset in target buffer
2683 * @sender_uaddr user address in source buffer
2684 * @length bytes to copy
2687 * This is used for the sg copy list (sgc) which is created and consumed
2688 * during binder_transaction() and is only accessed locally. No
2689 * locking is necessary.
2691 * The list is ordered by @offset.
2693 struct binder_sg_copy {
2694 binder_size_t offset;
2695 const void __user *sender_uaddr;
2697 struct list_head node;
2701 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2702 * @alloc: binder_alloc associated with @buffer
2703 * @buffer: binder buffer in target process
2704 * @sgc_head: list_head of scatter-gather copy list
2705 * @pf_head: list_head of pointer fixup list
2707 * Processes all elements of @sgc_head, applying fixups from @pf_head
2708 * and copying the scatter-gather data from the source process' user
2709 * buffer to the target's buffer. It is expected that the list creation
2710 * and processing all occurs during binder_transaction() so these lists
2711 * are only accessed in local context.
2713 * Return: 0=success, else -errno
2715 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2716 struct binder_buffer *buffer,
2717 struct list_head *sgc_head,
2718 struct list_head *pf_head)
2721 struct binder_sg_copy *sgc, *tmpsgc;
2722 struct binder_ptr_fixup *tmppf;
2723 struct binder_ptr_fixup *pf =
2724 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2727 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2728 size_t bytes_copied = 0;
2730 while (bytes_copied < sgc->length) {
2732 size_t bytes_left = sgc->length - bytes_copied;
2733 size_t offset = sgc->offset + bytes_copied;
2736 * We copy up to the fixup (pointed to by pf)
2738 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2740 if (!ret && copy_size)
2741 ret = binder_alloc_copy_user_to_buffer(
2744 sgc->sender_uaddr + bytes_copied,
2746 bytes_copied += copy_size;
2747 if (copy_size != bytes_left) {
2749 /* we stopped at a fixup offset */
2750 if (pf->skip_size) {
2752 * we are just skipping. This is for
2753 * BINDER_TYPE_FDA where the translated
2754 * fds will be fixed up when we get
2755 * to target context.
2757 bytes_copied += pf->skip_size;
2759 /* apply the fixup indicated by pf */
2761 ret = binder_alloc_copy_to_buffer(
2765 sizeof(pf->fixup_data));
2766 bytes_copied += sizeof(pf->fixup_data);
2768 list_del(&pf->node);
2770 pf = list_first_entry_or_null(pf_head,
2771 struct binder_ptr_fixup, node);
2774 list_del(&sgc->node);
2777 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2778 BUG_ON(pf->skip_size == 0);
2779 list_del(&pf->node);
2782 BUG_ON(!list_empty(sgc_head));
2784 return ret > 0 ? -EINVAL : ret;
2788 * binder_cleanup_deferred_txn_lists() - free specified lists
2789 * @sgc_head: list_head of scatter-gather copy list
2790 * @pf_head: list_head of pointer fixup list
2792 * Called to clean up @sgc_head and @pf_head if there is an
2795 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2796 struct list_head *pf_head)
2798 struct binder_sg_copy *sgc, *tmpsgc;
2799 struct binder_ptr_fixup *pf, *tmppf;
2801 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2802 list_del(&sgc->node);
2805 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2806 list_del(&pf->node);
2812 * binder_defer_copy() - queue a scatter-gather buffer for copy
2813 * @sgc_head: list_head of scatter-gather copy list
2814 * @offset: binder buffer offset in target process
2815 * @sender_uaddr: user address in source process
2816 * @length: bytes to copy
2818 * Specify a scatter-gather block to be copied. The actual copy must
2819 * be deferred until all the needed fixups are identified and queued.
2820 * Then the copy and fixups are done together so un-translated values
2821 * from the source are never visible in the target buffer.
2823 * We are guaranteed that repeated calls to this function will have
2824 * monotonically increasing @offset values so the list will naturally
2827 * Return: 0=success, else -errno
2829 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2830 const void __user *sender_uaddr, size_t length)
2832 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2837 bc->offset = offset;
2838 bc->sender_uaddr = sender_uaddr;
2839 bc->length = length;
2840 INIT_LIST_HEAD(&bc->node);
2843 * We are guaranteed that the deferred copies are in-order
2844 * so just add to the tail.
2846 list_add_tail(&bc->node, sgc_head);
2852 * binder_add_fixup() - queue a fixup to be applied to sg copy
2853 * @pf_head: list_head of binder ptr fixup list
2854 * @offset: binder buffer offset in target process
2855 * @fixup: bytes to be copied for fixup
2856 * @skip_size: bytes to skip when copying (fixup will be applied later)
2858 * Add the specified fixup to a list ordered by @offset. When copying
2859 * the scatter-gather buffers, the fixup will be copied instead of
2860 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2861 * will be applied later (in target process context), so we just skip
2862 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2865 * This function is called *mostly* in @offset order, but there are
2866 * exceptions. Since out-of-order inserts are relatively uncommon,
2867 * we insert the new element by searching backward from the tail of
2870 * Return: 0=success, else -errno
2872 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2873 binder_uintptr_t fixup, size_t skip_size)
2875 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2876 struct binder_ptr_fixup *tmppf;
2881 pf->offset = offset;
2882 pf->fixup_data = fixup;
2883 pf->skip_size = skip_size;
2884 INIT_LIST_HEAD(&pf->node);
2886 /* Fixups are *mostly* added in-order, but there are some
2887 * exceptions. Look backwards through list for insertion point.
2889 list_for_each_entry_reverse(tmppf, pf_head, node) {
2890 if (tmppf->offset < pf->offset) {
2891 list_add(&pf->node, &tmppf->node);
2896 * if we get here, then the new offset is the lowest so
2897 * insert at the head
2899 list_add(&pf->node, pf_head);
2903 static int binder_translate_fd_array(struct list_head *pf_head,
2904 struct binder_fd_array_object *fda,
2905 const void __user *sender_ubuffer,
2906 struct binder_buffer_object *parent,
2907 struct binder_buffer_object *sender_uparent,
2908 struct binder_transaction *t,
2909 struct binder_thread *thread,
2910 struct binder_transaction *in_reply_to)
2912 binder_size_t fdi, fd_buf_size;
2913 binder_size_t fda_offset;
2914 const void __user *sender_ufda_base;
2915 struct binder_proc *proc = thread->proc;
2918 if (fda->num_fds == 0)
2921 fd_buf_size = sizeof(u32) * fda->num_fds;
2922 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2923 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2924 proc->pid, thread->pid, (u64)fda->num_fds);
2927 if (fd_buf_size > parent->length ||
2928 fda->parent_offset > parent->length - fd_buf_size) {
2929 /* No space for all file descriptors here. */
2930 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2931 proc->pid, thread->pid, (u64)fda->num_fds);
2935 * the source data for binder_buffer_object is visible
2936 * to user-space and the @buffer element is the user
2937 * pointer to the buffer_object containing the fd_array.
2938 * Convert the address to an offset relative to
2939 * the base of the transaction buffer.
2941 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2943 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2946 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2947 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2948 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2949 proc->pid, thread->pid);
2952 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2956 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2958 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2959 binder_size_t sender_uoffset = fdi * sizeof(fd);
2961 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2963 ret = binder_translate_fd(fd, offset, t, thread,
2966 return ret > 0 ? -EINVAL : ret;
2971 static int binder_fixup_parent(struct list_head *pf_head,
2972 struct binder_transaction *t,
2973 struct binder_thread *thread,
2974 struct binder_buffer_object *bp,
2975 binder_size_t off_start_offset,
2976 binder_size_t num_valid,
2977 binder_size_t last_fixup_obj_off,
2978 binder_size_t last_fixup_min_off)
2980 struct binder_buffer_object *parent;
2981 struct binder_buffer *b = t->buffer;
2982 struct binder_proc *proc = thread->proc;
2983 struct binder_proc *target_proc = t->to_proc;
2984 struct binder_object object;
2985 binder_size_t buffer_offset;
2986 binder_size_t parent_offset;
2988 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2991 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2992 off_start_offset, &parent_offset,
2995 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2996 proc->pid, thread->pid);
3000 if (!binder_validate_fixup(target_proc, b, off_start_offset,
3001 parent_offset, bp->parent_offset,
3003 last_fixup_min_off)) {
3004 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3005 proc->pid, thread->pid);
3009 if (parent->length < sizeof(binder_uintptr_t) ||
3010 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
3011 /* No space for a pointer here! */
3012 binder_user_error("%d:%d got transaction with invalid parent offset\n",
3013 proc->pid, thread->pid);
3016 buffer_offset = bp->parent_offset +
3017 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
3018 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
3022 * binder_proc_transaction() - sends a transaction to a process and wakes it up
3023 * @t: transaction to send
3024 * @proc: process to send the transaction to
3025 * @thread: thread in @proc to send the transaction to (may be NULL)
3027 * This function queues a transaction to the specified process. It will try
3028 * to find a thread in the target process to handle the transaction and
3029 * wake it up. If no thread is found, the work is queued to the proc
3032 * If the @thread parameter is not NULL, the transaction is always queued
3033 * to the waitlist of that specific thread.
3035 * Return: true if the transactions was successfully queued
3036 * false if the target process or thread is dead
3038 static bool binder_proc_transaction(struct binder_transaction *t,
3039 struct binder_proc *proc,
3040 struct binder_thread *thread)
3042 struct binder_node *node = t->buffer->target_node;
3043 bool oneway = !!(t->flags & TF_ONE_WAY);
3044 bool pending_async = false;
3047 binder_node_lock(node);
3050 if (node->has_async_transaction)
3051 pending_async = true;
3053 node->has_async_transaction = true;
3056 binder_inner_proc_lock(proc);
3058 if (proc->is_dead || (thread && thread->is_dead)) {
3059 binder_inner_proc_unlock(proc);
3060 binder_node_unlock(node);
3064 if (!thread && !pending_async)
3065 thread = binder_select_thread_ilocked(proc);
3068 binder_enqueue_thread_work_ilocked(thread, &t->work);
3069 else if (!pending_async)
3070 binder_enqueue_work_ilocked(&t->work, &proc->todo);
3072 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
3075 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
3077 binder_inner_proc_unlock(proc);
3078 binder_node_unlock(node);
3084 * binder_get_node_refs_for_txn() - Get required refs on node for txn
3085 * @node: struct binder_node for which to get refs
3086 * @proc: returns @node->proc if valid
3087 * @error: if no @proc then returns BR_DEAD_REPLY
3089 * User-space normally keeps the node alive when creating a transaction
3090 * since it has a reference to the target. The local strong ref keeps it
3091 * alive if the sending process dies before the target process processes
3092 * the transaction. If the source process is malicious or has a reference
3093 * counting bug, relying on the local strong ref can fail.
3095 * Since user-space can cause the local strong ref to go away, we also take
3096 * a tmpref on the node to ensure it survives while we are constructing
3097 * the transaction. We also need a tmpref on the proc while we are
3098 * constructing the transaction, so we take that here as well.
3100 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
3101 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
3102 * target proc has died, @error is set to BR_DEAD_REPLY
3104 static struct binder_node *binder_get_node_refs_for_txn(
3105 struct binder_node *node,
3106 struct binder_proc **procp,
3109 struct binder_node *target_node = NULL;
3111 binder_node_inner_lock(node);
3114 binder_inc_node_nilocked(node, 1, 0, NULL);
3115 binder_inc_node_tmpref_ilocked(node);
3116 node->proc->tmp_ref++;
3117 *procp = node->proc;
3119 *error = BR_DEAD_REPLY;
3120 binder_node_inner_unlock(node);
3125 static void binder_transaction(struct binder_proc *proc,
3126 struct binder_thread *thread,
3127 struct binder_transaction_data *tr, int reply,
3128 binder_size_t extra_buffers_size)
3131 struct binder_transaction *t;
3132 struct binder_work *w;
3133 struct binder_work *tcomplete;
3134 binder_size_t buffer_offset = 0;
3135 binder_size_t off_start_offset, off_end_offset;
3136 binder_size_t off_min;
3137 binder_size_t sg_buf_offset, sg_buf_end_offset;
3138 binder_size_t user_offset = 0;
3139 struct binder_proc *target_proc = NULL;
3140 struct binder_thread *target_thread = NULL;
3141 struct binder_node *target_node = NULL;
3142 struct binder_transaction *in_reply_to = NULL;
3143 struct binder_transaction_log_entry *e;
3144 uint32_t return_error = 0;
3145 uint32_t return_error_param = 0;
3146 uint32_t return_error_line = 0;
3147 binder_size_t last_fixup_obj_off = 0;
3148 binder_size_t last_fixup_min_off = 0;
3149 struct binder_context *context = proc->context;
3150 int t_debug_id = atomic_inc_return(&binder_last_id);
3151 char *secctx = NULL;
3153 struct list_head sgc_head;
3154 struct list_head pf_head;
3155 const void __user *user_buffer = (const void __user *)
3156 (uintptr_t)tr->data.ptr.buffer;
3157 INIT_LIST_HEAD(&sgc_head);
3158 INIT_LIST_HEAD(&pf_head);
3160 e = binder_transaction_log_add(&binder_transaction_log);
3161 e->debug_id = t_debug_id;
3162 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3163 e->from_proc = proc->pid;
3164 e->from_thread = thread->pid;
3165 e->target_handle = tr->target.handle;
3166 e->data_size = tr->data_size;
3167 e->offsets_size = tr->offsets_size;
3168 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
3171 binder_inner_proc_lock(proc);
3172 in_reply_to = thread->transaction_stack;
3173 if (in_reply_to == NULL) {
3174 binder_inner_proc_unlock(proc);
3175 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3176 proc->pid, thread->pid);
3177 return_error = BR_FAILED_REPLY;
3178 return_error_param = -EPROTO;
3179 return_error_line = __LINE__;
3180 goto err_empty_call_stack;
3182 if (in_reply_to->to_thread != thread) {
3183 spin_lock(&in_reply_to->lock);
3184 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3185 proc->pid, thread->pid, in_reply_to->debug_id,
3186 in_reply_to->to_proc ?
3187 in_reply_to->to_proc->pid : 0,
3188 in_reply_to->to_thread ?
3189 in_reply_to->to_thread->pid : 0);
3190 spin_unlock(&in_reply_to->lock);
3191 binder_inner_proc_unlock(proc);
3192 return_error = BR_FAILED_REPLY;
3193 return_error_param = -EPROTO;
3194 return_error_line = __LINE__;
3196 goto err_bad_call_stack;
3198 thread->transaction_stack = in_reply_to->to_parent;
3199 binder_inner_proc_unlock(proc);
3200 binder_set_nice(in_reply_to->saved_priority);
3201 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3202 if (target_thread == NULL) {
3203 /* annotation for sparse */
3204 __release(&target_thread->proc->inner_lock);
3205 return_error = BR_DEAD_REPLY;
3206 return_error_line = __LINE__;
3207 goto err_dead_binder;
3209 if (target_thread->transaction_stack != in_reply_to) {
3210 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3211 proc->pid, thread->pid,
3212 target_thread->transaction_stack ?
3213 target_thread->transaction_stack->debug_id : 0,
3214 in_reply_to->debug_id);
3215 binder_inner_proc_unlock(target_thread->proc);
3216 return_error = BR_FAILED_REPLY;
3217 return_error_param = -EPROTO;
3218 return_error_line = __LINE__;
3220 target_thread = NULL;
3221 goto err_dead_binder;
3223 target_proc = target_thread->proc;
3224 target_proc->tmp_ref++;
3225 binder_inner_proc_unlock(target_thread->proc);
3227 if (tr->target.handle) {
3228 struct binder_ref *ref;
3231 * There must already be a strong ref
3232 * on this node. If so, do a strong
3233 * increment on the node to ensure it
3234 * stays alive until the transaction is
3237 binder_proc_lock(proc);
3238 ref = binder_get_ref_olocked(proc, tr->target.handle,
3241 target_node = binder_get_node_refs_for_txn(
3242 ref->node, &target_proc,
3245 binder_user_error("%d:%d got transaction to invalid handle\n",
3246 proc->pid, thread->pid);
3247 return_error = BR_FAILED_REPLY;
3249 binder_proc_unlock(proc);
3251 mutex_lock(&context->context_mgr_node_lock);
3252 target_node = context->binder_context_mgr_node;
3254 target_node = binder_get_node_refs_for_txn(
3255 target_node, &target_proc,
3258 return_error = BR_DEAD_REPLY;
3259 mutex_unlock(&context->context_mgr_node_lock);
3260 if (target_node && target_proc->pid == proc->pid) {
3261 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3262 proc->pid, thread->pid);
3263 return_error = BR_FAILED_REPLY;
3264 return_error_param = -EINVAL;
3265 return_error_line = __LINE__;
3266 goto err_invalid_target_handle;
3271 * return_error is set above
3273 return_error_param = -EINVAL;
3274 return_error_line = __LINE__;
3275 goto err_dead_binder;
3277 e->to_node = target_node->debug_id;
3278 if (WARN_ON(proc == target_proc)) {
3279 return_error = BR_FAILED_REPLY;
3280 return_error_param = -EINVAL;
3281 return_error_line = __LINE__;
3282 goto err_invalid_target_handle;
3284 if (security_binder_transaction(proc->cred,
3285 target_proc->cred) < 0) {
3286 return_error = BR_FAILED_REPLY;
3287 return_error_param = -EPERM;
3288 return_error_line = __LINE__;
3289 goto err_invalid_target_handle;
3291 binder_inner_proc_lock(proc);
3293 w = list_first_entry_or_null(&thread->todo,
3294 struct binder_work, entry);
3295 if (!(tr->flags & TF_ONE_WAY) && w &&
3296 w->type == BINDER_WORK_TRANSACTION) {
3298 * Do not allow new outgoing transaction from a
3299 * thread that has a transaction at the head of
3300 * its todo list. Only need to check the head
3301 * because binder_select_thread_ilocked picks a
3302 * thread from proc->waiting_threads to enqueue
3303 * the transaction, and nothing is queued to the
3304 * todo list while the thread is on waiting_threads.
3306 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3307 proc->pid, thread->pid);
3308 binder_inner_proc_unlock(proc);
3309 return_error = BR_FAILED_REPLY;
3310 return_error_param = -EPROTO;
3311 return_error_line = __LINE__;
3312 goto err_bad_todo_list;
3315 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3316 struct binder_transaction *tmp;
3318 tmp = thread->transaction_stack;
3319 if (tmp->to_thread != thread) {
3320 spin_lock(&tmp->lock);
3321 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3322 proc->pid, thread->pid, tmp->debug_id,
3323 tmp->to_proc ? tmp->to_proc->pid : 0,
3325 tmp->to_thread->pid : 0);
3326 spin_unlock(&tmp->lock);
3327 binder_inner_proc_unlock(proc);
3328 return_error = BR_FAILED_REPLY;
3329 return_error_param = -EPROTO;
3330 return_error_line = __LINE__;
3331 goto err_bad_call_stack;
3334 struct binder_thread *from;
3336 spin_lock(&tmp->lock);
3338 if (from && from->proc == target_proc) {
3339 atomic_inc(&from->tmp_ref);
3340 target_thread = from;
3341 spin_unlock(&tmp->lock);
3344 spin_unlock(&tmp->lock);
3345 tmp = tmp->from_parent;
3348 binder_inner_proc_unlock(proc);
3351 e->to_thread = target_thread->pid;
3352 e->to_proc = target_proc->pid;
3354 /* TODO: reuse incoming transaction for reply */
3355 t = kzalloc(sizeof(*t), GFP_KERNEL);
3357 return_error = BR_FAILED_REPLY;
3358 return_error_param = -ENOMEM;
3359 return_error_line = __LINE__;
3360 goto err_alloc_t_failed;
3362 INIT_LIST_HEAD(&t->fd_fixups);
3363 binder_stats_created(BINDER_STAT_TRANSACTION);
3364 spin_lock_init(&t->lock);
3366 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3367 if (tcomplete == NULL) {
3368 return_error = BR_FAILED_REPLY;
3369 return_error_param = -ENOMEM;
3370 return_error_line = __LINE__;
3371 goto err_alloc_tcomplete_failed;
3373 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3375 t->debug_id = t_debug_id;
3378 binder_debug(BINDER_DEBUG_TRANSACTION,
3379 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3380 proc->pid, thread->pid, t->debug_id,
3381 target_proc->pid, target_thread->pid,
3382 (u64)tr->data.ptr.buffer,
3383 (u64)tr->data.ptr.offsets,
3384 (u64)tr->data_size, (u64)tr->offsets_size,
3385 (u64)extra_buffers_size);
3387 binder_debug(BINDER_DEBUG_TRANSACTION,
3388 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3389 proc->pid, thread->pid, t->debug_id,
3390 target_proc->pid, target_node->debug_id,
3391 (u64)tr->data.ptr.buffer,
3392 (u64)tr->data.ptr.offsets,
3393 (u64)tr->data_size, (u64)tr->offsets_size,
3394 (u64)extra_buffers_size);
3396 if (!reply && !(tr->flags & TF_ONE_WAY))
3400 t->sender_euid = task_euid(proc->tsk);
3401 t->to_proc = target_proc;
3402 t->to_thread = target_thread;
3404 t->flags = tr->flags;
3405 t->priority = task_nice(current);
3407 if (target_node && target_node->txn_security_ctx) {
3411 security_cred_getsecid(proc->cred, &secid);
3412 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3414 return_error = BR_FAILED_REPLY;
3415 return_error_param = ret;
3416 return_error_line = __LINE__;
3417 goto err_get_secctx_failed;
3419 added_size = ALIGN(secctx_sz, sizeof(u64));
3420 extra_buffers_size += added_size;
3421 if (extra_buffers_size < added_size) {
3422 /* integer overflow of extra_buffers_size */
3423 return_error = BR_FAILED_REPLY;
3424 return_error_param = EINVAL;
3425 return_error_line = __LINE__;
3426 goto err_bad_extra_size;
3430 trace_binder_transaction(reply, t, target_node);
3432 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3433 tr->offsets_size, extra_buffers_size,
3434 !reply && (t->flags & TF_ONE_WAY), current->tgid);
3435 if (IS_ERR(t->buffer)) {
3437 * -ESRCH indicates VMA cleared. The target is dying.
3439 return_error_param = PTR_ERR(t->buffer);
3440 return_error = return_error_param == -ESRCH ?
3441 BR_DEAD_REPLY : BR_FAILED_REPLY;
3442 return_error_line = __LINE__;
3444 goto err_binder_alloc_buf_failed;
3448 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3449 ALIGN(tr->offsets_size, sizeof(void *)) +
3450 ALIGN(extra_buffers_size, sizeof(void *)) -
3451 ALIGN(secctx_sz, sizeof(u64));
3453 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3454 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3455 t->buffer, buf_offset,
3458 t->security_ctx = 0;
3461 security_release_secctx(secctx, secctx_sz);
3464 t->buffer->debug_id = t->debug_id;
3465 t->buffer->transaction = t;
3466 t->buffer->target_node = target_node;
3467 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3468 trace_binder_transaction_alloc_buf(t->buffer);
3470 if (binder_alloc_copy_user_to_buffer(
3471 &target_proc->alloc,
3473 ALIGN(tr->data_size, sizeof(void *)),
3474 (const void __user *)
3475 (uintptr_t)tr->data.ptr.offsets,
3476 tr->offsets_size)) {
3477 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3478 proc->pid, thread->pid);
3479 return_error = BR_FAILED_REPLY;
3480 return_error_param = -EFAULT;
3481 return_error_line = __LINE__;
3482 goto err_copy_data_failed;
3484 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3485 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3486 proc->pid, thread->pid, (u64)tr->offsets_size);
3487 return_error = BR_FAILED_REPLY;
3488 return_error_param = -EINVAL;
3489 return_error_line = __LINE__;
3490 goto err_bad_offset;
3492 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3493 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3494 proc->pid, thread->pid,
3495 (u64)extra_buffers_size);
3496 return_error = BR_FAILED_REPLY;
3497 return_error_param = -EINVAL;
3498 return_error_line = __LINE__;
3499 goto err_bad_offset;
3501 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3502 buffer_offset = off_start_offset;
3503 off_end_offset = off_start_offset + tr->offsets_size;
3504 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3505 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3506 ALIGN(secctx_sz, sizeof(u64));
3508 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3509 buffer_offset += sizeof(binder_size_t)) {
3510 struct binder_object_header *hdr;
3512 struct binder_object object;
3513 binder_size_t object_offset;
3514 binder_size_t copy_size;
3516 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3520 sizeof(object_offset))) {
3521 return_error = BR_FAILED_REPLY;
3522 return_error_param = -EINVAL;
3523 return_error_line = __LINE__;
3524 goto err_bad_offset;
3528 * Copy the source user buffer up to the next object
3529 * that will be processed.
3531 copy_size = object_offset - user_offset;
3532 if (copy_size && (user_offset > object_offset ||
3533 binder_alloc_copy_user_to_buffer(
3534 &target_proc->alloc,
3535 t->buffer, user_offset,
3536 user_buffer + user_offset,
3538 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3539 proc->pid, thread->pid);
3540 return_error = BR_FAILED_REPLY;
3541 return_error_param = -EFAULT;
3542 return_error_line = __LINE__;
3543 goto err_copy_data_failed;
3545 object_size = binder_get_object(target_proc, user_buffer,
3546 t->buffer, object_offset, &object);
3547 if (object_size == 0 || object_offset < off_min) {
3548 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3549 proc->pid, thread->pid,
3552 (u64)t->buffer->data_size);
3553 return_error = BR_FAILED_REPLY;
3554 return_error_param = -EINVAL;
3555 return_error_line = __LINE__;
3556 goto err_bad_offset;
3559 * Set offset to the next buffer fragment to be
3562 user_offset = object_offset + object_size;
3565 off_min = object_offset + object_size;
3566 switch (hdr->type) {
3567 case BINDER_TYPE_BINDER:
3568 case BINDER_TYPE_WEAK_BINDER: {
3569 struct flat_binder_object *fp;
3571 fp = to_flat_binder_object(hdr);
3572 ret = binder_translate_binder(fp, t, thread);
3575 binder_alloc_copy_to_buffer(&target_proc->alloc,
3579 return_error = BR_FAILED_REPLY;
3580 return_error_param = ret;
3581 return_error_line = __LINE__;
3582 goto err_translate_failed;
3585 case BINDER_TYPE_HANDLE:
3586 case BINDER_TYPE_WEAK_HANDLE: {
3587 struct flat_binder_object *fp;
3589 fp = to_flat_binder_object(hdr);
3590 ret = binder_translate_handle(fp, t, thread);
3592 binder_alloc_copy_to_buffer(&target_proc->alloc,
3596 return_error = BR_FAILED_REPLY;
3597 return_error_param = ret;
3598 return_error_line = __LINE__;
3599 goto err_translate_failed;
3603 case BINDER_TYPE_FD: {
3604 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3605 binder_size_t fd_offset = object_offset +
3606 (uintptr_t)&fp->fd - (uintptr_t)fp;
3607 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3608 thread, in_reply_to);
3612 binder_alloc_copy_to_buffer(&target_proc->alloc,
3616 return_error = BR_FAILED_REPLY;
3617 return_error_param = ret;
3618 return_error_line = __LINE__;
3619 goto err_translate_failed;
3622 case BINDER_TYPE_FDA: {
3623 struct binder_object ptr_object;
3624 binder_size_t parent_offset;
3625 struct binder_object user_object;
3626 size_t user_parent_size;
3627 struct binder_fd_array_object *fda =
3628 to_binder_fd_array_object(hdr);
3629 size_t num_valid = (buffer_offset - off_start_offset) /
3630 sizeof(binder_size_t);
3631 struct binder_buffer_object *parent =
3632 binder_validate_ptr(target_proc, t->buffer,
3633 &ptr_object, fda->parent,
3638 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3639 proc->pid, thread->pid);
3640 return_error = BR_FAILED_REPLY;
3641 return_error_param = -EINVAL;
3642 return_error_line = __LINE__;
3643 goto err_bad_parent;
3645 if (!binder_validate_fixup(target_proc, t->buffer,
3650 last_fixup_min_off)) {
3651 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3652 proc->pid, thread->pid);
3653 return_error = BR_FAILED_REPLY;
3654 return_error_param = -EINVAL;
3655 return_error_line = __LINE__;
3656 goto err_bad_parent;
3659 * We need to read the user version of the parent
3660 * object to get the original user offset
3663 binder_get_object(proc, user_buffer, t->buffer,
3664 parent_offset, &user_object);
3665 if (user_parent_size != sizeof(user_object.bbo)) {
3666 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3667 proc->pid, thread->pid,
3669 sizeof(user_object.bbo));
3670 return_error = BR_FAILED_REPLY;
3671 return_error_param = -EINVAL;
3672 return_error_line = __LINE__;
3673 goto err_bad_parent;
3675 ret = binder_translate_fd_array(&pf_head, fda,
3676 user_buffer, parent,
3677 &user_object.bbo, t,
3678 thread, in_reply_to);
3680 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3685 return_error = BR_FAILED_REPLY;
3686 return_error_param = ret > 0 ? -EINVAL : ret;
3687 return_error_line = __LINE__;
3688 goto err_translate_failed;
3690 last_fixup_obj_off = parent_offset;
3691 last_fixup_min_off =
3692 fda->parent_offset + sizeof(u32) * fda->num_fds;
3694 case BINDER_TYPE_PTR: {
3695 struct binder_buffer_object *bp =
3696 to_binder_buffer_object(hdr);
3697 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3700 if (bp->length > buf_left) {
3701 binder_user_error("%d:%d got transaction with too large buffer\n",
3702 proc->pid, thread->pid);
3703 return_error = BR_FAILED_REPLY;
3704 return_error_param = -EINVAL;
3705 return_error_line = __LINE__;
3706 goto err_bad_offset;
3708 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3709 (const void __user *)(uintptr_t)bp->buffer,
3712 return_error = BR_FAILED_REPLY;
3713 return_error_param = ret;
3714 return_error_line = __LINE__;
3715 goto err_translate_failed;
3717 /* Fixup buffer pointer to target proc address space */
3718 bp->buffer = (uintptr_t)
3719 t->buffer->user_data + sg_buf_offset;
3720 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3722 num_valid = (buffer_offset - off_start_offset) /
3723 sizeof(binder_size_t);
3724 ret = binder_fixup_parent(&pf_head, t,
3729 last_fixup_min_off);
3731 binder_alloc_copy_to_buffer(&target_proc->alloc,
3735 return_error = BR_FAILED_REPLY;
3736 return_error_param = ret;
3737 return_error_line = __LINE__;
3738 goto err_translate_failed;
3740 last_fixup_obj_off = object_offset;
3741 last_fixup_min_off = 0;
3744 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3745 proc->pid, thread->pid, hdr->type);
3746 return_error = BR_FAILED_REPLY;
3747 return_error_param = -EINVAL;
3748 return_error_line = __LINE__;
3749 goto err_bad_object_type;
3752 /* Done processing objects, copy the rest of the buffer */
3753 if (binder_alloc_copy_user_to_buffer(
3754 &target_proc->alloc,
3755 t->buffer, user_offset,
3756 user_buffer + user_offset,
3757 tr->data_size - user_offset)) {
3758 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3759 proc->pid, thread->pid);
3760 return_error = BR_FAILED_REPLY;
3761 return_error_param = -EFAULT;
3762 return_error_line = __LINE__;
3763 goto err_copy_data_failed;
3766 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3767 &sgc_head, &pf_head);
3769 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3770 proc->pid, thread->pid);
3771 return_error = BR_FAILED_REPLY;
3772 return_error_param = ret;
3773 return_error_line = __LINE__;
3774 goto err_copy_data_failed;
3776 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3777 t->work.type = BINDER_WORK_TRANSACTION;
3780 binder_enqueue_thread_work(thread, tcomplete);
3781 binder_inner_proc_lock(target_proc);
3782 if (target_thread->is_dead) {
3783 binder_inner_proc_unlock(target_proc);
3784 goto err_dead_proc_or_thread;
3786 BUG_ON(t->buffer->async_transaction != 0);
3787 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3788 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3789 binder_inner_proc_unlock(target_proc);
3790 wake_up_interruptible_sync(&target_thread->wait);
3791 binder_free_transaction(in_reply_to);
3792 } else if (!(t->flags & TF_ONE_WAY)) {
3793 BUG_ON(t->buffer->async_transaction != 0);
3794 binder_inner_proc_lock(proc);
3796 * Defer the TRANSACTION_COMPLETE, so we don't return to
3797 * userspace immediately; this allows the target process to
3798 * immediately start processing this transaction, reducing
3799 * latency. We will then return the TRANSACTION_COMPLETE when
3800 * the target replies (or there is an error).
3802 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3804 t->from_parent = thread->transaction_stack;
3805 thread->transaction_stack = t;
3806 binder_inner_proc_unlock(proc);
3807 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3808 binder_inner_proc_lock(proc);
3809 binder_pop_transaction_ilocked(thread, t);
3810 binder_inner_proc_unlock(proc);
3811 goto err_dead_proc_or_thread;
3814 BUG_ON(target_node == NULL);
3815 BUG_ON(t->buffer->async_transaction != 1);
3816 binder_enqueue_thread_work(thread, tcomplete);
3817 if (!binder_proc_transaction(t, target_proc, NULL))
3818 goto err_dead_proc_or_thread;
3821 binder_thread_dec_tmpref(target_thread);
3822 binder_proc_dec_tmpref(target_proc);
3824 binder_dec_node_tmpref(target_node);
3826 * write barrier to synchronize with initialization
3830 WRITE_ONCE(e->debug_id_done, t_debug_id);
3833 err_dead_proc_or_thread:
3834 return_error = BR_DEAD_REPLY;
3835 return_error_line = __LINE__;
3836 binder_dequeue_work(proc, tcomplete);
3837 err_translate_failed:
3838 err_bad_object_type:
3841 err_copy_data_failed:
3842 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3843 binder_free_txn_fixups(t);
3844 trace_binder_transaction_failed_buffer_release(t->buffer);
3845 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3846 buffer_offset, true);
3848 binder_dec_node_tmpref(target_node);
3850 t->buffer->transaction = NULL;
3851 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3852 err_binder_alloc_buf_failed:
3855 security_release_secctx(secctx, secctx_sz);
3856 err_get_secctx_failed:
3858 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3859 err_alloc_tcomplete_failed:
3861 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3865 err_empty_call_stack:
3867 err_invalid_target_handle:
3869 binder_thread_dec_tmpref(target_thread);
3871 binder_proc_dec_tmpref(target_proc);
3873 binder_dec_node(target_node, 1, 0);
3874 binder_dec_node_tmpref(target_node);
3877 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3878 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3879 proc->pid, thread->pid, return_error, return_error_param,
3880 (u64)tr->data_size, (u64)tr->offsets_size,
3884 struct binder_transaction_log_entry *fe;
3886 e->return_error = return_error;
3887 e->return_error_param = return_error_param;
3888 e->return_error_line = return_error_line;
3889 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3892 * write barrier to synchronize with initialization
3896 WRITE_ONCE(e->debug_id_done, t_debug_id);
3897 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3900 BUG_ON(thread->return_error.cmd != BR_OK);
3902 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3903 binder_enqueue_thread_work(thread, &thread->return_error.work);
3904 binder_send_failed_reply(in_reply_to, return_error);
3906 thread->return_error.cmd = return_error;
3907 binder_enqueue_thread_work(thread, &thread->return_error.work);
3912 * binder_free_buf() - free the specified buffer
3913 * @proc: binder proc that owns buffer
3914 * @buffer: buffer to be freed
3915 * @is_failure: failed to send transaction
3917 * If buffer for an async transaction, enqueue the next async
3918 * transaction from the node.
3920 * Cleanup buffer and free it.
3923 binder_free_buf(struct binder_proc *proc,
3924 struct binder_thread *thread,
3925 struct binder_buffer *buffer, bool is_failure)
3927 binder_inner_proc_lock(proc);
3928 if (buffer->transaction) {
3929 buffer->transaction->buffer = NULL;
3930 buffer->transaction = NULL;
3932 binder_inner_proc_unlock(proc);
3933 if (buffer->async_transaction && buffer->target_node) {
3934 struct binder_node *buf_node;
3935 struct binder_work *w;
3937 buf_node = buffer->target_node;
3938 binder_node_inner_lock(buf_node);
3939 BUG_ON(!buf_node->has_async_transaction);
3940 BUG_ON(buf_node->proc != proc);
3941 w = binder_dequeue_work_head_ilocked(
3942 &buf_node->async_todo);
3944 buf_node->has_async_transaction = false;
3946 binder_enqueue_work_ilocked(
3948 binder_wakeup_proc_ilocked(proc);
3950 binder_node_inner_unlock(buf_node);
3952 trace_binder_transaction_buffer_release(buffer);
3953 binder_release_entire_buffer(proc, thread, buffer, is_failure);
3954 binder_alloc_free_buf(&proc->alloc, buffer);
3957 static int binder_thread_write(struct binder_proc *proc,
3958 struct binder_thread *thread,
3959 binder_uintptr_t binder_buffer, size_t size,
3960 binder_size_t *consumed)
3963 struct binder_context *context = proc->context;
3964 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3965 void __user *ptr = buffer + *consumed;
3966 void __user *end = buffer + size;
3968 while (ptr < end && thread->return_error.cmd == BR_OK) {
3971 if (get_user(cmd, (uint32_t __user *)ptr))
3973 ptr += sizeof(uint32_t);
3974 trace_binder_command(cmd);
3975 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3976 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3977 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3978 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3986 const char *debug_string;
3987 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3988 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3989 struct binder_ref_data rdata;
3991 if (get_user(target, (uint32_t __user *)ptr))
3994 ptr += sizeof(uint32_t);
3996 if (increment && !target) {
3997 struct binder_node *ctx_mgr_node;
3998 mutex_lock(&context->context_mgr_node_lock);
3999 ctx_mgr_node = context->binder_context_mgr_node;
4001 if (ctx_mgr_node->proc == proc) {
4002 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
4003 proc->pid, thread->pid);
4004 mutex_unlock(&context->context_mgr_node_lock);
4007 ret = binder_inc_ref_for_node(
4009 strong, NULL, &rdata);
4011 mutex_unlock(&context->context_mgr_node_lock);
4014 ret = binder_update_ref_for_handle(
4015 proc, target, increment, strong,
4017 if (!ret && rdata.desc != target) {
4018 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
4019 proc->pid, thread->pid,
4020 target, rdata.desc);
4024 debug_string = "IncRefs";
4027 debug_string = "Acquire";
4030 debug_string = "Release";
4034 debug_string = "DecRefs";
4038 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
4039 proc->pid, thread->pid, debug_string,
4040 strong, target, ret);
4043 binder_debug(BINDER_DEBUG_USER_REFS,
4044 "%d:%d %s ref %d desc %d s %d w %d\n",
4045 proc->pid, thread->pid, debug_string,
4046 rdata.debug_id, rdata.desc, rdata.strong,
4050 case BC_INCREFS_DONE:
4051 case BC_ACQUIRE_DONE: {
4052 binder_uintptr_t node_ptr;
4053 binder_uintptr_t cookie;
4054 struct binder_node *node;
4057 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
4059 ptr += sizeof(binder_uintptr_t);
4060 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4062 ptr += sizeof(binder_uintptr_t);
4063 node = binder_get_node(proc, node_ptr);
4065 binder_user_error("%d:%d %s u%016llx no match\n",
4066 proc->pid, thread->pid,
4067 cmd == BC_INCREFS_DONE ?
4073 if (cookie != node->cookie) {
4074 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
4075 proc->pid, thread->pid,
4076 cmd == BC_INCREFS_DONE ?
4077 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4078 (u64)node_ptr, node->debug_id,
4079 (u64)cookie, (u64)node->cookie);
4080 binder_put_node(node);
4083 binder_node_inner_lock(node);
4084 if (cmd == BC_ACQUIRE_DONE) {
4085 if (node->pending_strong_ref == 0) {
4086 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4087 proc->pid, thread->pid,
4089 binder_node_inner_unlock(node);
4090 binder_put_node(node);
4093 node->pending_strong_ref = 0;
4095 if (node->pending_weak_ref == 0) {
4096 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4097 proc->pid, thread->pid,
4099 binder_node_inner_unlock(node);
4100 binder_put_node(node);
4103 node->pending_weak_ref = 0;
4105 free_node = binder_dec_node_nilocked(node,
4106 cmd == BC_ACQUIRE_DONE, 0);
4108 binder_debug(BINDER_DEBUG_USER_REFS,
4109 "%d:%d %s node %d ls %d lw %d tr %d\n",
4110 proc->pid, thread->pid,
4111 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4112 node->debug_id, node->local_strong_refs,
4113 node->local_weak_refs, node->tmp_refs);
4114 binder_node_inner_unlock(node);
4115 binder_put_node(node);
4118 case BC_ATTEMPT_ACQUIRE:
4119 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4121 case BC_ACQUIRE_RESULT:
4122 pr_err("BC_ACQUIRE_RESULT not supported\n");
4125 case BC_FREE_BUFFER: {
4126 binder_uintptr_t data_ptr;
4127 struct binder_buffer *buffer;
4129 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4131 ptr += sizeof(binder_uintptr_t);
4133 buffer = binder_alloc_prepare_to_free(&proc->alloc,
4135 if (IS_ERR_OR_NULL(buffer)) {
4136 if (PTR_ERR(buffer) == -EPERM) {
4138 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
4139 proc->pid, thread->pid,
4143 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
4144 proc->pid, thread->pid,
4149 binder_debug(BINDER_DEBUG_FREE_BUFFER,
4150 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
4151 proc->pid, thread->pid, (u64)data_ptr,
4153 buffer->transaction ? "active" : "finished");
4154 binder_free_buf(proc, thread, buffer, false);
4158 case BC_TRANSACTION_SG:
4160 struct binder_transaction_data_sg tr;
4162 if (copy_from_user(&tr, ptr, sizeof(tr)))
4165 binder_transaction(proc, thread, &tr.transaction_data,
4166 cmd == BC_REPLY_SG, tr.buffers_size);
4169 case BC_TRANSACTION:
4171 struct binder_transaction_data tr;
4173 if (copy_from_user(&tr, ptr, sizeof(tr)))
4176 binder_transaction(proc, thread, &tr,
4177 cmd == BC_REPLY, 0);
4181 case BC_REGISTER_LOOPER:
4182 binder_debug(BINDER_DEBUG_THREADS,
4183 "%d:%d BC_REGISTER_LOOPER\n",
4184 proc->pid, thread->pid);
4185 binder_inner_proc_lock(proc);
4186 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4187 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4188 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4189 proc->pid, thread->pid);
4190 } else if (proc->requested_threads == 0) {
4191 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4192 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4193 proc->pid, thread->pid);
4195 proc->requested_threads--;
4196 proc->requested_threads_started++;
4198 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4199 binder_inner_proc_unlock(proc);
4201 case BC_ENTER_LOOPER:
4202 binder_debug(BINDER_DEBUG_THREADS,
4203 "%d:%d BC_ENTER_LOOPER\n",
4204 proc->pid, thread->pid);
4205 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4206 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4207 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4208 proc->pid, thread->pid);
4210 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4212 case BC_EXIT_LOOPER:
4213 binder_debug(BINDER_DEBUG_THREADS,
4214 "%d:%d BC_EXIT_LOOPER\n",
4215 proc->pid, thread->pid);
4216 thread->looper |= BINDER_LOOPER_STATE_EXITED;
4219 case BC_REQUEST_DEATH_NOTIFICATION:
4220 case BC_CLEAR_DEATH_NOTIFICATION: {
4222 binder_uintptr_t cookie;
4223 struct binder_ref *ref;
4224 struct binder_ref_death *death = NULL;
4226 if (get_user(target, (uint32_t __user *)ptr))
4228 ptr += sizeof(uint32_t);
4229 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4231 ptr += sizeof(binder_uintptr_t);
4232 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4234 * Allocate memory for death notification
4235 * before taking lock
4237 death = kzalloc(sizeof(*death), GFP_KERNEL);
4238 if (death == NULL) {
4239 WARN_ON(thread->return_error.cmd !=
4241 thread->return_error.cmd = BR_ERROR;
4242 binder_enqueue_thread_work(
4244 &thread->return_error.work);
4246 BINDER_DEBUG_FAILED_TRANSACTION,
4247 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4248 proc->pid, thread->pid);
4252 binder_proc_lock(proc);
4253 ref = binder_get_ref_olocked(proc, target, false);
4255 binder_user_error("%d:%d %s invalid ref %d\n",
4256 proc->pid, thread->pid,
4257 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4258 "BC_REQUEST_DEATH_NOTIFICATION" :
4259 "BC_CLEAR_DEATH_NOTIFICATION",
4261 binder_proc_unlock(proc);
4266 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4267 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4268 proc->pid, thread->pid,
4269 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4270 "BC_REQUEST_DEATH_NOTIFICATION" :
4271 "BC_CLEAR_DEATH_NOTIFICATION",
4272 (u64)cookie, ref->data.debug_id,
4273 ref->data.desc, ref->data.strong,
4274 ref->data.weak, ref->node->debug_id);
4276 binder_node_lock(ref->node);
4277 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4279 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4280 proc->pid, thread->pid);
4281 binder_node_unlock(ref->node);
4282 binder_proc_unlock(proc);
4286 binder_stats_created(BINDER_STAT_DEATH);
4287 INIT_LIST_HEAD(&death->work.entry);
4288 death->cookie = cookie;
4290 if (ref->node->proc == NULL) {
4291 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4293 binder_inner_proc_lock(proc);
4294 binder_enqueue_work_ilocked(
4295 &ref->death->work, &proc->todo);
4296 binder_wakeup_proc_ilocked(proc);
4297 binder_inner_proc_unlock(proc);
4300 if (ref->death == NULL) {
4301 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4302 proc->pid, thread->pid);
4303 binder_node_unlock(ref->node);
4304 binder_proc_unlock(proc);
4308 if (death->cookie != cookie) {
4309 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4310 proc->pid, thread->pid,
4313 binder_node_unlock(ref->node);
4314 binder_proc_unlock(proc);
4318 binder_inner_proc_lock(proc);
4319 if (list_empty(&death->work.entry)) {
4320 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4321 if (thread->looper &
4322 (BINDER_LOOPER_STATE_REGISTERED |
4323 BINDER_LOOPER_STATE_ENTERED))
4324 binder_enqueue_thread_work_ilocked(
4328 binder_enqueue_work_ilocked(
4331 binder_wakeup_proc_ilocked(
4335 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4336 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4338 binder_inner_proc_unlock(proc);
4340 binder_node_unlock(ref->node);
4341 binder_proc_unlock(proc);
4343 case BC_DEAD_BINDER_DONE: {
4344 struct binder_work *w;
4345 binder_uintptr_t cookie;
4346 struct binder_ref_death *death = NULL;
4348 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4351 ptr += sizeof(cookie);
4352 binder_inner_proc_lock(proc);
4353 list_for_each_entry(w, &proc->delivered_death,
4355 struct binder_ref_death *tmp_death =
4357 struct binder_ref_death,
4360 if (tmp_death->cookie == cookie) {
4365 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4366 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4367 proc->pid, thread->pid, (u64)cookie,
4369 if (death == NULL) {
4370 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4371 proc->pid, thread->pid, (u64)cookie);
4372 binder_inner_proc_unlock(proc);
4375 binder_dequeue_work_ilocked(&death->work);
4376 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4377 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4378 if (thread->looper &
4379 (BINDER_LOOPER_STATE_REGISTERED |
4380 BINDER_LOOPER_STATE_ENTERED))
4381 binder_enqueue_thread_work_ilocked(
4382 thread, &death->work);
4384 binder_enqueue_work_ilocked(
4387 binder_wakeup_proc_ilocked(proc);
4390 binder_inner_proc_unlock(proc);
4394 pr_err("%d:%d unknown command %d\n",
4395 proc->pid, thread->pid, cmd);
4398 *consumed = ptr - buffer;
4403 static void binder_stat_br(struct binder_proc *proc,
4404 struct binder_thread *thread, uint32_t cmd)
4406 trace_binder_return(cmd);
4407 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4408 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4409 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4410 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4414 static int binder_put_node_cmd(struct binder_proc *proc,
4415 struct binder_thread *thread,
4417 binder_uintptr_t node_ptr,
4418 binder_uintptr_t node_cookie,
4420 uint32_t cmd, const char *cmd_name)
4422 void __user *ptr = *ptrp;
4424 if (put_user(cmd, (uint32_t __user *)ptr))
4426 ptr += sizeof(uint32_t);
4428 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4430 ptr += sizeof(binder_uintptr_t);
4432 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4434 ptr += sizeof(binder_uintptr_t);
4436 binder_stat_br(proc, thread, cmd);
4437 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4438 proc->pid, thread->pid, cmd_name, node_debug_id,
4439 (u64)node_ptr, (u64)node_cookie);
4445 static int binder_wait_for_work(struct binder_thread *thread,
4449 struct binder_proc *proc = thread->proc;
4452 freezer_do_not_count();
4453 binder_inner_proc_lock(proc);
4455 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4456 if (binder_has_work_ilocked(thread, do_proc_work))
4459 list_add(&thread->waiting_thread_node,
4460 &proc->waiting_threads);
4461 binder_inner_proc_unlock(proc);
4463 binder_inner_proc_lock(proc);
4464 list_del_init(&thread->waiting_thread_node);
4465 if (signal_pending(current)) {
4470 finish_wait(&thread->wait, &wait);
4471 binder_inner_proc_unlock(proc);
4478 * binder_apply_fd_fixups() - finish fd translation
4479 * @proc: binder_proc associated @t->buffer
4480 * @t: binder transaction with list of fd fixups
4482 * Now that we are in the context of the transaction target
4483 * process, we can allocate and install fds. Process the
4484 * list of fds to translate and fixup the buffer with the
4487 * If we fail to allocate an fd, then free the resources by
4488 * fput'ing files that have not been processed and ksys_close'ing
4489 * any fds that have already been allocated.
4491 static int binder_apply_fd_fixups(struct binder_proc *proc,
4492 struct binder_transaction *t)
4494 struct binder_txn_fd_fixup *fixup, *tmp;
4497 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4498 int fd = get_unused_fd_flags(O_CLOEXEC);
4501 binder_debug(BINDER_DEBUG_TRANSACTION,
4502 "failed fd fixup txn %d fd %d\n",
4507 binder_debug(BINDER_DEBUG_TRANSACTION,
4508 "fd fixup txn %d fd %d\n",
4510 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4511 fd_install(fd, fixup->file);
4513 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4520 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4527 err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4533 binder_deferred_fd_close(fd);
4535 list_del(&fixup->fixup_entry);
4542 static int binder_thread_read(struct binder_proc *proc,
4543 struct binder_thread *thread,
4544 binder_uintptr_t binder_buffer, size_t size,
4545 binder_size_t *consumed, int non_block)
4547 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4548 void __user *ptr = buffer + *consumed;
4549 void __user *end = buffer + size;
4552 int wait_for_proc_work;
4554 if (*consumed == 0) {
4555 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4557 ptr += sizeof(uint32_t);
4561 binder_inner_proc_lock(proc);
4562 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4563 binder_inner_proc_unlock(proc);
4565 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4567 trace_binder_wait_for_work(wait_for_proc_work,
4568 !!thread->transaction_stack,
4569 !binder_worklist_empty(proc, &thread->todo));
4570 if (wait_for_proc_work) {
4571 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4572 BINDER_LOOPER_STATE_ENTERED))) {
4573 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4574 proc->pid, thread->pid, thread->looper);
4575 wait_event_interruptible(binder_user_error_wait,
4576 binder_stop_on_user_error < 2);
4578 binder_set_nice(proc->default_priority);
4582 if (!binder_has_work(thread, wait_for_proc_work))
4585 ret = binder_wait_for_work(thread, wait_for_proc_work);
4588 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4595 struct binder_transaction_data_secctx tr;
4596 struct binder_transaction_data *trd = &tr.transaction_data;
4597 struct binder_work *w = NULL;
4598 struct list_head *list = NULL;
4599 struct binder_transaction *t = NULL;
4600 struct binder_thread *t_from;
4601 size_t trsize = sizeof(*trd);
4603 binder_inner_proc_lock(proc);
4604 if (!binder_worklist_empty_ilocked(&thread->todo))
4605 list = &thread->todo;
4606 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4610 binder_inner_proc_unlock(proc);
4613 if (ptr - buffer == 4 && !thread->looper_need_return)
4618 if (end - ptr < sizeof(tr) + 4) {
4619 binder_inner_proc_unlock(proc);
4622 w = binder_dequeue_work_head_ilocked(list);
4623 if (binder_worklist_empty_ilocked(&thread->todo))
4624 thread->process_todo = false;
4627 case BINDER_WORK_TRANSACTION: {
4628 binder_inner_proc_unlock(proc);
4629 t = container_of(w, struct binder_transaction, work);
4631 case BINDER_WORK_RETURN_ERROR: {
4632 struct binder_error *e = container_of(
4633 w, struct binder_error, work);
4635 WARN_ON(e->cmd == BR_OK);
4636 binder_inner_proc_unlock(proc);
4637 if (put_user(e->cmd, (uint32_t __user *)ptr))
4641 ptr += sizeof(uint32_t);
4643 binder_stat_br(proc, thread, cmd);
4645 case BINDER_WORK_TRANSACTION_COMPLETE: {
4646 binder_inner_proc_unlock(proc);
4647 cmd = BR_TRANSACTION_COMPLETE;
4649 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4650 if (put_user(cmd, (uint32_t __user *)ptr))
4652 ptr += sizeof(uint32_t);
4654 binder_stat_br(proc, thread, cmd);
4655 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4656 "%d:%d BR_TRANSACTION_COMPLETE\n",
4657 proc->pid, thread->pid);
4659 case BINDER_WORK_NODE: {
4660 struct binder_node *node = container_of(w, struct binder_node, work);
4662 binder_uintptr_t node_ptr = node->ptr;
4663 binder_uintptr_t node_cookie = node->cookie;
4664 int node_debug_id = node->debug_id;
4667 void __user *orig_ptr = ptr;
4669 BUG_ON(proc != node->proc);
4670 strong = node->internal_strong_refs ||
4671 node->local_strong_refs;
4672 weak = !hlist_empty(&node->refs) ||
4673 node->local_weak_refs ||
4674 node->tmp_refs || strong;
4675 has_strong_ref = node->has_strong_ref;
4676 has_weak_ref = node->has_weak_ref;
4678 if (weak && !has_weak_ref) {
4679 node->has_weak_ref = 1;
4680 node->pending_weak_ref = 1;
4681 node->local_weak_refs++;
4683 if (strong && !has_strong_ref) {
4684 node->has_strong_ref = 1;
4685 node->pending_strong_ref = 1;
4686 node->local_strong_refs++;
4688 if (!strong && has_strong_ref)
4689 node->has_strong_ref = 0;
4690 if (!weak && has_weak_ref)
4691 node->has_weak_ref = 0;
4692 if (!weak && !strong) {
4693 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4694 "%d:%d node %d u%016llx c%016llx deleted\n",
4695 proc->pid, thread->pid,
4699 rb_erase(&node->rb_node, &proc->nodes);
4700 binder_inner_proc_unlock(proc);
4701 binder_node_lock(node);
4703 * Acquire the node lock before freeing the
4704 * node to serialize with other threads that
4705 * may have been holding the node lock while
4706 * decrementing this node (avoids race where
4707 * this thread frees while the other thread
4708 * is unlocking the node after the final
4711 binder_node_unlock(node);
4712 binder_free_node(node);
4714 binder_inner_proc_unlock(proc);
4716 if (weak && !has_weak_ref)
4717 ret = binder_put_node_cmd(
4718 proc, thread, &ptr, node_ptr,
4719 node_cookie, node_debug_id,
4720 BR_INCREFS, "BR_INCREFS");
4721 if (!ret && strong && !has_strong_ref)
4722 ret = binder_put_node_cmd(
4723 proc, thread, &ptr, node_ptr,
4724 node_cookie, node_debug_id,
4725 BR_ACQUIRE, "BR_ACQUIRE");
4726 if (!ret && !strong && has_strong_ref)
4727 ret = binder_put_node_cmd(
4728 proc, thread, &ptr, node_ptr,
4729 node_cookie, node_debug_id,
4730 BR_RELEASE, "BR_RELEASE");
4731 if (!ret && !weak && has_weak_ref)
4732 ret = binder_put_node_cmd(
4733 proc, thread, &ptr, node_ptr,
4734 node_cookie, node_debug_id,
4735 BR_DECREFS, "BR_DECREFS");
4736 if (orig_ptr == ptr)
4737 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4738 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4739 proc->pid, thread->pid,
4746 case BINDER_WORK_DEAD_BINDER:
4747 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4748 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4749 struct binder_ref_death *death;
4751 binder_uintptr_t cookie;
4753 death = container_of(w, struct binder_ref_death, work);
4754 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4755 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4757 cmd = BR_DEAD_BINDER;
4758 cookie = death->cookie;
4760 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4761 "%d:%d %s %016llx\n",
4762 proc->pid, thread->pid,
4763 cmd == BR_DEAD_BINDER ?
4765 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4767 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4768 binder_inner_proc_unlock(proc);
4770 binder_stats_deleted(BINDER_STAT_DEATH);
4772 binder_enqueue_work_ilocked(
4773 w, &proc->delivered_death);
4774 binder_inner_proc_unlock(proc);
4776 if (put_user(cmd, (uint32_t __user *)ptr))
4778 ptr += sizeof(uint32_t);
4779 if (put_user(cookie,
4780 (binder_uintptr_t __user *)ptr))
4782 ptr += sizeof(binder_uintptr_t);
4783 binder_stat_br(proc, thread, cmd);
4784 if (cmd == BR_DEAD_BINDER)
4785 goto done; /* DEAD_BINDER notifications can cause transactions */
4788 binder_inner_proc_unlock(proc);
4789 pr_err("%d:%d: bad work type %d\n",
4790 proc->pid, thread->pid, w->type);
4797 BUG_ON(t->buffer == NULL);
4798 if (t->buffer->target_node) {
4799 struct binder_node *target_node = t->buffer->target_node;
4801 trd->target.ptr = target_node->ptr;
4802 trd->cookie = target_node->cookie;
4803 t->saved_priority = task_nice(current);
4804 if (t->priority < target_node->min_priority &&
4805 !(t->flags & TF_ONE_WAY))
4806 binder_set_nice(t->priority);
4807 else if (!(t->flags & TF_ONE_WAY) ||
4808 t->saved_priority > target_node->min_priority)
4809 binder_set_nice(target_node->min_priority);
4810 cmd = BR_TRANSACTION;
4812 trd->target.ptr = 0;
4816 trd->code = t->code;
4817 trd->flags = t->flags;
4818 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4820 t_from = binder_get_txn_from(t);
4822 struct task_struct *sender = t_from->proc->tsk;
4825 task_tgid_nr_ns(sender,
4826 task_active_pid_ns(current));
4828 trd->sender_pid = 0;
4831 ret = binder_apply_fd_fixups(proc, t);
4833 struct binder_buffer *buffer = t->buffer;
4834 bool oneway = !!(t->flags & TF_ONE_WAY);
4835 int tid = t->debug_id;
4838 binder_thread_dec_tmpref(t_from);
4839 buffer->transaction = NULL;
4840 binder_cleanup_transaction(t, "fd fixups failed",
4842 binder_free_buf(proc, thread, buffer, true);
4843 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4844 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4845 proc->pid, thread->pid,
4847 (cmd == BR_REPLY ? "reply " : ""),
4848 tid, BR_FAILED_REPLY, ret, __LINE__);
4849 if (cmd == BR_REPLY) {
4850 cmd = BR_FAILED_REPLY;
4851 if (put_user(cmd, (uint32_t __user *)ptr))
4853 ptr += sizeof(uint32_t);
4854 binder_stat_br(proc, thread, cmd);
4859 trd->data_size = t->buffer->data_size;
4860 trd->offsets_size = t->buffer->offsets_size;
4861 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4862 trd->data.ptr.offsets = trd->data.ptr.buffer +
4863 ALIGN(t->buffer->data_size,
4866 tr.secctx = t->security_ctx;
4867 if (t->security_ctx) {
4868 cmd = BR_TRANSACTION_SEC_CTX;
4869 trsize = sizeof(tr);
4871 if (put_user(cmd, (uint32_t __user *)ptr)) {
4873 binder_thread_dec_tmpref(t_from);
4875 binder_cleanup_transaction(t, "put_user failed",
4880 ptr += sizeof(uint32_t);
4881 if (copy_to_user(ptr, &tr, trsize)) {
4883 binder_thread_dec_tmpref(t_from);
4885 binder_cleanup_transaction(t, "copy_to_user failed",
4892 trace_binder_transaction_received(t);
4893 binder_stat_br(proc, thread, cmd);
4894 binder_debug(BINDER_DEBUG_TRANSACTION,
4895 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4896 proc->pid, thread->pid,
4897 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4898 (cmd == BR_TRANSACTION_SEC_CTX) ?
4899 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4900 t->debug_id, t_from ? t_from->proc->pid : 0,
4901 t_from ? t_from->pid : 0, cmd,
4902 t->buffer->data_size, t->buffer->offsets_size,
4903 (u64)trd->data.ptr.buffer,
4904 (u64)trd->data.ptr.offsets);
4907 binder_thread_dec_tmpref(t_from);
4908 t->buffer->allow_user_free = 1;
4909 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4910 binder_inner_proc_lock(thread->proc);
4911 t->to_parent = thread->transaction_stack;
4912 t->to_thread = thread;
4913 thread->transaction_stack = t;
4914 binder_inner_proc_unlock(thread->proc);
4916 binder_free_transaction(t);
4923 *consumed = ptr - buffer;
4924 binder_inner_proc_lock(proc);
4925 if (proc->requested_threads == 0 &&
4926 list_empty(&thread->proc->waiting_threads) &&
4927 proc->requested_threads_started < proc->max_threads &&
4928 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4929 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4930 /*spawn a new thread if we leave this out */) {
4931 proc->requested_threads++;
4932 binder_inner_proc_unlock(proc);
4933 binder_debug(BINDER_DEBUG_THREADS,
4934 "%d:%d BR_SPAWN_LOOPER\n",
4935 proc->pid, thread->pid);
4936 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4938 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4940 binder_inner_proc_unlock(proc);
4944 static void binder_release_work(struct binder_proc *proc,
4945 struct list_head *list)
4947 struct binder_work *w;
4948 enum binder_work_type wtype;
4951 binder_inner_proc_lock(proc);
4952 w = binder_dequeue_work_head_ilocked(list);
4953 wtype = w ? w->type : 0;
4954 binder_inner_proc_unlock(proc);
4959 case BINDER_WORK_TRANSACTION: {
4960 struct binder_transaction *t;
4962 t = container_of(w, struct binder_transaction, work);
4964 binder_cleanup_transaction(t, "process died.",
4967 case BINDER_WORK_RETURN_ERROR: {
4968 struct binder_error *e = container_of(
4969 w, struct binder_error, work);
4971 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4972 "undelivered TRANSACTION_ERROR: %u\n",
4975 case BINDER_WORK_TRANSACTION_COMPLETE: {
4976 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4977 "undelivered TRANSACTION_COMPLETE\n");
4979 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4981 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4982 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4983 struct binder_ref_death *death;
4985 death = container_of(w, struct binder_ref_death, work);
4986 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4987 "undelivered death notification, %016llx\n",
4988 (u64)death->cookie);
4990 binder_stats_deleted(BINDER_STAT_DEATH);
4992 case BINDER_WORK_NODE:
4995 pr_err("unexpected work type, %d, not freed\n",
5003 static struct binder_thread *binder_get_thread_ilocked(
5004 struct binder_proc *proc, struct binder_thread *new_thread)
5006 struct binder_thread *thread = NULL;
5007 struct rb_node *parent = NULL;
5008 struct rb_node **p = &proc->threads.rb_node;
5012 thread = rb_entry(parent, struct binder_thread, rb_node);
5014 if (current->pid < thread->pid)
5016 else if (current->pid > thread->pid)
5017 p = &(*p)->rb_right;
5023 thread = new_thread;
5024 binder_stats_created(BINDER_STAT_THREAD);
5025 thread->proc = proc;
5026 thread->pid = current->pid;
5027 atomic_set(&thread->tmp_ref, 0);
5028 init_waitqueue_head(&thread->wait);
5029 INIT_LIST_HEAD(&thread->todo);
5030 rb_link_node(&thread->rb_node, parent, p);
5031 rb_insert_color(&thread->rb_node, &proc->threads);
5032 thread->looper_need_return = true;
5033 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
5034 thread->return_error.cmd = BR_OK;
5035 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
5036 thread->reply_error.cmd = BR_OK;
5037 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
5041 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
5043 struct binder_thread *thread;
5044 struct binder_thread *new_thread;
5046 binder_inner_proc_lock(proc);
5047 thread = binder_get_thread_ilocked(proc, NULL);
5048 binder_inner_proc_unlock(proc);
5050 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
5051 if (new_thread == NULL)
5053 binder_inner_proc_lock(proc);
5054 thread = binder_get_thread_ilocked(proc, new_thread);
5055 binder_inner_proc_unlock(proc);
5056 if (thread != new_thread)
5062 static void binder_free_proc(struct binder_proc *proc)
5064 struct binder_device *device;
5066 BUG_ON(!list_empty(&proc->todo));
5067 BUG_ON(!list_empty(&proc->delivered_death));
5068 device = container_of(proc->context, struct binder_device, context);
5069 if (refcount_dec_and_test(&device->ref)) {
5070 kfree(proc->context->name);
5073 binder_alloc_deferred_release(&proc->alloc);
5074 put_task_struct(proc->tsk);
5075 put_cred(proc->cred);
5076 binder_stats_deleted(BINDER_STAT_PROC);
5080 static void binder_free_thread(struct binder_thread *thread)
5082 BUG_ON(!list_empty(&thread->todo));
5083 binder_stats_deleted(BINDER_STAT_THREAD);
5084 binder_proc_dec_tmpref(thread->proc);
5088 static int binder_thread_release(struct binder_proc *proc,
5089 struct binder_thread *thread)
5091 struct binder_transaction *t;
5092 struct binder_transaction *send_reply = NULL;
5093 int active_transactions = 0;
5094 struct binder_transaction *last_t = NULL;
5096 binder_inner_proc_lock(thread->proc);
5098 * take a ref on the proc so it survives
5099 * after we remove this thread from proc->threads.
5100 * The corresponding dec is when we actually
5101 * free the thread in binder_free_thread()
5105 * take a ref on this thread to ensure it
5106 * survives while we are releasing it
5108 atomic_inc(&thread->tmp_ref);
5109 rb_erase(&thread->rb_node, &proc->threads);
5110 t = thread->transaction_stack;
5112 spin_lock(&t->lock);
5113 if (t->to_thread == thread)
5116 __acquire(&t->lock);
5118 thread->is_dead = true;
5122 active_transactions++;
5123 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5124 "release %d:%d transaction %d %s, still active\n",
5125 proc->pid, thread->pid,
5127 (t->to_thread == thread) ? "in" : "out");
5129 if (t->to_thread == thread) {
5131 t->to_thread = NULL;
5133 t->buffer->transaction = NULL;
5137 } else if (t->from == thread) {
5142 spin_unlock(&last_t->lock);
5144 spin_lock(&t->lock);
5146 __acquire(&t->lock);
5148 /* annotation for sparse, lock not acquired in last iteration above */
5149 __release(&t->lock);
5152 * If this thread used poll, make sure we remove the waitqueue from any
5153 * poll data structures holding it.
5155 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5156 wake_up_pollfree(&thread->wait);
5158 binder_inner_proc_unlock(thread->proc);
5161 * This is needed to avoid races between wake_up_pollfree() above and
5162 * someone else removing the last entry from the queue for other reasons
5163 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5164 * descriptor being closed). Such other users hold an RCU read lock, so
5165 * we can be sure they're done after we call synchronize_rcu().
5167 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5171 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5172 binder_release_work(proc, &thread->todo);
5173 binder_thread_dec_tmpref(thread);
5174 return active_transactions;
5177 static __poll_t binder_poll(struct file *filp,
5178 struct poll_table_struct *wait)
5180 struct binder_proc *proc = filp->private_data;
5181 struct binder_thread *thread = NULL;
5182 bool wait_for_proc_work;
5184 thread = binder_get_thread(proc);
5188 binder_inner_proc_lock(thread->proc);
5189 thread->looper |= BINDER_LOOPER_STATE_POLL;
5190 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5192 binder_inner_proc_unlock(thread->proc);
5194 poll_wait(filp, &thread->wait, wait);
5196 if (binder_has_work(thread, wait_for_proc_work))
5202 static int binder_ioctl_write_read(struct file *filp,
5203 unsigned int cmd, unsigned long arg,
5204 struct binder_thread *thread)
5207 struct binder_proc *proc = filp->private_data;
5208 unsigned int size = _IOC_SIZE(cmd);
5209 void __user *ubuf = (void __user *)arg;
5210 struct binder_write_read bwr;
5212 if (size != sizeof(struct binder_write_read)) {
5216 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5220 binder_debug(BINDER_DEBUG_READ_WRITE,
5221 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5222 proc->pid, thread->pid,
5223 (u64)bwr.write_size, (u64)bwr.write_buffer,
5224 (u64)bwr.read_size, (u64)bwr.read_buffer);
5226 if (bwr.write_size > 0) {
5227 ret = binder_thread_write(proc, thread,
5230 &bwr.write_consumed);
5231 trace_binder_write_done(ret);
5233 bwr.read_consumed = 0;
5234 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5239 if (bwr.read_size > 0) {
5240 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5243 filp->f_flags & O_NONBLOCK);
5244 trace_binder_read_done(ret);
5245 binder_inner_proc_lock(proc);
5246 if (!binder_worklist_empty_ilocked(&proc->todo))
5247 binder_wakeup_proc_ilocked(proc);
5248 binder_inner_proc_unlock(proc);
5250 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5255 binder_debug(BINDER_DEBUG_READ_WRITE,
5256 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5257 proc->pid, thread->pid,
5258 (u64)bwr.write_consumed, (u64)bwr.write_size,
5259 (u64)bwr.read_consumed, (u64)bwr.read_size);
5260 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5268 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5269 struct flat_binder_object *fbo)
5272 struct binder_proc *proc = filp->private_data;
5273 struct binder_context *context = proc->context;
5274 struct binder_node *new_node;
5275 kuid_t curr_euid = current_euid();
5277 mutex_lock(&context->context_mgr_node_lock);
5278 if (context->binder_context_mgr_node) {
5279 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5283 ret = security_binder_set_context_mgr(proc->cred);
5286 if (uid_valid(context->binder_context_mgr_uid)) {
5287 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5288 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5289 from_kuid(&init_user_ns, curr_euid),
5290 from_kuid(&init_user_ns,
5291 context->binder_context_mgr_uid));
5296 context->binder_context_mgr_uid = curr_euid;
5298 new_node = binder_new_node(proc, fbo);
5303 binder_node_lock(new_node);
5304 new_node->local_weak_refs++;
5305 new_node->local_strong_refs++;
5306 new_node->has_strong_ref = 1;
5307 new_node->has_weak_ref = 1;
5308 context->binder_context_mgr_node = new_node;
5309 binder_node_unlock(new_node);
5310 binder_put_node(new_node);
5312 mutex_unlock(&context->context_mgr_node_lock);
5316 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5317 struct binder_node_info_for_ref *info)
5319 struct binder_node *node;
5320 struct binder_context *context = proc->context;
5321 __u32 handle = info->handle;
5323 if (info->strong_count || info->weak_count || info->reserved1 ||
5324 info->reserved2 || info->reserved3) {
5325 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5330 /* This ioctl may only be used by the context manager */
5331 mutex_lock(&context->context_mgr_node_lock);
5332 if (!context->binder_context_mgr_node ||
5333 context->binder_context_mgr_node->proc != proc) {
5334 mutex_unlock(&context->context_mgr_node_lock);
5337 mutex_unlock(&context->context_mgr_node_lock);
5339 node = binder_get_node_from_ref(proc, handle, true, NULL);
5343 info->strong_count = node->local_strong_refs +
5344 node->internal_strong_refs;
5345 info->weak_count = node->local_weak_refs;
5347 binder_put_node(node);
5352 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5353 struct binder_node_debug_info *info)
5356 binder_uintptr_t ptr = info->ptr;
5358 memset(info, 0, sizeof(*info));
5360 binder_inner_proc_lock(proc);
5361 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5362 struct binder_node *node = rb_entry(n, struct binder_node,
5364 if (node->ptr > ptr) {
5365 info->ptr = node->ptr;
5366 info->cookie = node->cookie;
5367 info->has_strong_ref = node->has_strong_ref;
5368 info->has_weak_ref = node->has_weak_ref;
5372 binder_inner_proc_unlock(proc);
5377 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5380 struct binder_proc *proc = filp->private_data;
5381 struct binder_thread *thread;
5382 unsigned int size = _IOC_SIZE(cmd);
5383 void __user *ubuf = (void __user *)arg;
5385 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5386 proc->pid, current->pid, cmd, arg);*/
5388 binder_selftest_alloc(&proc->alloc);
5390 trace_binder_ioctl(cmd, arg);
5392 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5396 thread = binder_get_thread(proc);
5397 if (thread == NULL) {
5403 case BINDER_WRITE_READ:
5404 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5408 case BINDER_SET_MAX_THREADS: {
5411 if (copy_from_user(&max_threads, ubuf,
5412 sizeof(max_threads))) {
5416 binder_inner_proc_lock(proc);
5417 proc->max_threads = max_threads;
5418 binder_inner_proc_unlock(proc);
5421 case BINDER_SET_CONTEXT_MGR_EXT: {
5422 struct flat_binder_object fbo;
5424 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5428 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5433 case BINDER_SET_CONTEXT_MGR:
5434 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5438 case BINDER_THREAD_EXIT:
5439 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5440 proc->pid, thread->pid);
5441 binder_thread_release(proc, thread);
5444 case BINDER_VERSION: {
5445 struct binder_version __user *ver = ubuf;
5447 if (size != sizeof(struct binder_version)) {
5451 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5452 &ver->protocol_version)) {
5458 case BINDER_GET_NODE_INFO_FOR_REF: {
5459 struct binder_node_info_for_ref info;
5461 if (copy_from_user(&info, ubuf, sizeof(info))) {
5466 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5470 if (copy_to_user(ubuf, &info, sizeof(info))) {
5477 case BINDER_GET_NODE_DEBUG_INFO: {
5478 struct binder_node_debug_info info;
5480 if (copy_from_user(&info, ubuf, sizeof(info))) {
5485 ret = binder_ioctl_get_node_debug_info(proc, &info);
5489 if (copy_to_user(ubuf, &info, sizeof(info))) {
5502 thread->looper_need_return = false;
5503 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5504 if (ret && ret != -ERESTARTSYS)
5505 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5507 trace_binder_ioctl_done(ret);
5511 static void binder_vma_open(struct vm_area_struct *vma)
5513 struct binder_proc *proc = vma->vm_private_data;
5515 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5516 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5517 proc->pid, vma->vm_start, vma->vm_end,
5518 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5519 (unsigned long)pgprot_val(vma->vm_page_prot));
5522 static void binder_vma_close(struct vm_area_struct *vma)
5524 struct binder_proc *proc = vma->vm_private_data;
5526 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5527 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5528 proc->pid, vma->vm_start, vma->vm_end,
5529 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5530 (unsigned long)pgprot_val(vma->vm_page_prot));
5531 binder_alloc_vma_close(&proc->alloc);
5534 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5536 return VM_FAULT_SIGBUS;
5539 static const struct vm_operations_struct binder_vm_ops = {
5540 .open = binder_vma_open,
5541 .close = binder_vma_close,
5542 .fault = binder_vm_fault,
5545 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5547 struct binder_proc *proc = filp->private_data;
5549 if (proc->tsk != current->group_leader)
5552 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5553 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5554 __func__, proc->pid, vma->vm_start, vma->vm_end,
5555 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5556 (unsigned long)pgprot_val(vma->vm_page_prot));
5558 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5559 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5560 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5563 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5564 vma->vm_flags &= ~VM_MAYWRITE;
5566 vma->vm_ops = &binder_vm_ops;
5567 vma->vm_private_data = proc;
5569 return binder_alloc_mmap_handler(&proc->alloc, vma);
5572 static int binder_open(struct inode *nodp, struct file *filp)
5574 struct binder_proc *proc, *itr;
5575 struct binder_device *binder_dev;
5576 struct binderfs_info *info;
5577 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5578 bool existing_pid = false;
5580 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5581 current->group_leader->pid, current->pid);
5583 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5586 spin_lock_init(&proc->inner_lock);
5587 spin_lock_init(&proc->outer_lock);
5588 get_task_struct(current->group_leader);
5589 proc->tsk = current->group_leader;
5590 proc->cred = get_cred(filp->f_cred);
5591 INIT_LIST_HEAD(&proc->todo);
5592 proc->default_priority = task_nice(current);
5593 /* binderfs stashes devices in i_private */
5594 if (is_binderfs_device(nodp)) {
5595 binder_dev = nodp->i_private;
5596 info = nodp->i_sb->s_fs_info;
5597 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5599 binder_dev = container_of(filp->private_data,
5600 struct binder_device, miscdev);
5602 refcount_inc(&binder_dev->ref);
5603 proc->context = &binder_dev->context;
5604 binder_alloc_init(&proc->alloc);
5606 binder_stats_created(BINDER_STAT_PROC);
5607 proc->pid = current->group_leader->pid;
5608 INIT_LIST_HEAD(&proc->delivered_death);
5609 INIT_LIST_HEAD(&proc->waiting_threads);
5610 filp->private_data = proc;
5612 mutex_lock(&binder_procs_lock);
5613 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5614 if (itr->pid == proc->pid) {
5615 existing_pid = true;
5619 hlist_add_head(&proc->proc_node, &binder_procs);
5620 mutex_unlock(&binder_procs_lock);
5622 if (binder_debugfs_dir_entry_proc && !existing_pid) {
5625 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5627 * proc debug entries are shared between contexts.
5628 * Only create for the first PID to avoid debugfs log spamming
5629 * The printing code will anyway print all contexts for a given
5630 * PID so this is not a problem.
5632 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5633 binder_debugfs_dir_entry_proc,
5634 (void *)(unsigned long)proc->pid,
5638 if (binder_binderfs_dir_entry_proc && !existing_pid) {
5640 struct dentry *binderfs_entry;
5642 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5644 * Similar to debugfs, the process specific log file is shared
5645 * between contexts. Only create for the first PID.
5646 * This is ok since same as debugfs, the log file will contain
5647 * information on all contexts of a given PID.
5649 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5650 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5651 if (!IS_ERR(binderfs_entry)) {
5652 proc->binderfs_entry = binderfs_entry;
5656 error = PTR_ERR(binderfs_entry);
5657 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5665 static int binder_flush(struct file *filp, fl_owner_t id)
5667 struct binder_proc *proc = filp->private_data;
5669 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5674 static void binder_deferred_flush(struct binder_proc *proc)
5679 binder_inner_proc_lock(proc);
5680 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5681 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5683 thread->looper_need_return = true;
5684 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5685 wake_up_interruptible(&thread->wait);
5689 binder_inner_proc_unlock(proc);
5691 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5692 "binder_flush: %d woke %d threads\n", proc->pid,
5696 static int binder_release(struct inode *nodp, struct file *filp)
5698 struct binder_proc *proc = filp->private_data;
5700 debugfs_remove(proc->debugfs_entry);
5702 if (proc->binderfs_entry) {
5703 binderfs_remove_file(proc->binderfs_entry);
5704 proc->binderfs_entry = NULL;
5707 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5712 static int binder_node_release(struct binder_node *node, int refs)
5714 struct binder_ref *ref;
5716 struct binder_proc *proc = node->proc;
5718 binder_release_work(proc, &node->async_todo);
5720 binder_node_lock(node);
5721 binder_inner_proc_lock(proc);
5722 binder_dequeue_work_ilocked(&node->work);
5724 * The caller must have taken a temporary ref on the node,
5726 BUG_ON(!node->tmp_refs);
5727 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5728 binder_inner_proc_unlock(proc);
5729 binder_node_unlock(node);
5730 binder_free_node(node);
5736 node->local_strong_refs = 0;
5737 node->local_weak_refs = 0;
5738 binder_inner_proc_unlock(proc);
5740 spin_lock(&binder_dead_nodes_lock);
5741 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5742 spin_unlock(&binder_dead_nodes_lock);
5744 hlist_for_each_entry(ref, &node->refs, node_entry) {
5747 * Need the node lock to synchronize
5748 * with new notification requests and the
5749 * inner lock to synchronize with queued
5750 * death notifications.
5752 binder_inner_proc_lock(ref->proc);
5754 binder_inner_proc_unlock(ref->proc);
5760 BUG_ON(!list_empty(&ref->death->work.entry));
5761 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5762 binder_enqueue_work_ilocked(&ref->death->work,
5764 binder_wakeup_proc_ilocked(ref->proc);
5765 binder_inner_proc_unlock(ref->proc);
5768 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5769 "node %d now dead, refs %d, death %d\n",
5770 node->debug_id, refs, death);
5771 binder_node_unlock(node);
5772 binder_put_node(node);
5777 static void binder_deferred_release(struct binder_proc *proc)
5779 struct binder_context *context = proc->context;
5781 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5783 mutex_lock(&binder_procs_lock);
5784 hlist_del(&proc->proc_node);
5785 mutex_unlock(&binder_procs_lock);
5787 mutex_lock(&context->context_mgr_node_lock);
5788 if (context->binder_context_mgr_node &&
5789 context->binder_context_mgr_node->proc == proc) {
5790 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5791 "%s: %d context_mgr_node gone\n",
5792 __func__, proc->pid);
5793 context->binder_context_mgr_node = NULL;
5795 mutex_unlock(&context->context_mgr_node_lock);
5796 binder_inner_proc_lock(proc);
5798 * Make sure proc stays alive after we
5799 * remove all the threads
5803 proc->is_dead = true;
5805 active_transactions = 0;
5806 while ((n = rb_first(&proc->threads))) {
5807 struct binder_thread *thread;
5809 thread = rb_entry(n, struct binder_thread, rb_node);
5810 binder_inner_proc_unlock(proc);
5812 active_transactions += binder_thread_release(proc, thread);
5813 binder_inner_proc_lock(proc);
5818 while ((n = rb_first(&proc->nodes))) {
5819 struct binder_node *node;
5821 node = rb_entry(n, struct binder_node, rb_node);
5824 * take a temporary ref on the node before
5825 * calling binder_node_release() which will either
5826 * kfree() the node or call binder_put_node()
5828 binder_inc_node_tmpref_ilocked(node);
5829 rb_erase(&node->rb_node, &proc->nodes);
5830 binder_inner_proc_unlock(proc);
5831 incoming_refs = binder_node_release(node, incoming_refs);
5832 binder_inner_proc_lock(proc);
5834 binder_inner_proc_unlock(proc);
5837 binder_proc_lock(proc);
5838 while ((n = rb_first(&proc->refs_by_desc))) {
5839 struct binder_ref *ref;
5841 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5843 binder_cleanup_ref_olocked(ref);
5844 binder_proc_unlock(proc);
5845 binder_free_ref(ref);
5846 binder_proc_lock(proc);
5848 binder_proc_unlock(proc);
5850 binder_release_work(proc, &proc->todo);
5851 binder_release_work(proc, &proc->delivered_death);
5853 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5854 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5855 __func__, proc->pid, threads, nodes, incoming_refs,
5856 outgoing_refs, active_transactions);
5858 binder_proc_dec_tmpref(proc);
5861 static void binder_deferred_func(struct work_struct *work)
5863 struct binder_proc *proc;
5868 mutex_lock(&binder_deferred_lock);
5869 if (!hlist_empty(&binder_deferred_list)) {
5870 proc = hlist_entry(binder_deferred_list.first,
5871 struct binder_proc, deferred_work_node);
5872 hlist_del_init(&proc->deferred_work_node);
5873 defer = proc->deferred_work;
5874 proc->deferred_work = 0;
5879 mutex_unlock(&binder_deferred_lock);
5881 if (defer & BINDER_DEFERRED_FLUSH)
5882 binder_deferred_flush(proc);
5884 if (defer & BINDER_DEFERRED_RELEASE)
5885 binder_deferred_release(proc); /* frees proc */
5888 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5891 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5893 mutex_lock(&binder_deferred_lock);
5894 proc->deferred_work |= defer;
5895 if (hlist_unhashed(&proc->deferred_work_node)) {
5896 hlist_add_head(&proc->deferred_work_node,
5897 &binder_deferred_list);
5898 schedule_work(&binder_deferred_work);
5900 mutex_unlock(&binder_deferred_lock);
5903 static void print_binder_transaction_ilocked(struct seq_file *m,
5904 struct binder_proc *proc,
5906 struct binder_transaction *t)
5908 struct binder_proc *to_proc;
5909 struct binder_buffer *buffer = t->buffer;
5911 spin_lock(&t->lock);
5912 to_proc = t->to_proc;
5914 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5915 prefix, t->debug_id, t,
5916 t->from ? t->from->proc->pid : 0,
5917 t->from ? t->from->pid : 0,
5918 to_proc ? to_proc->pid : 0,
5919 t->to_thread ? t->to_thread->pid : 0,
5920 t->code, t->flags, t->priority, t->need_reply);
5921 spin_unlock(&t->lock);
5923 if (proc != to_proc) {
5925 * Can only safely deref buffer if we are holding the
5926 * correct proc inner lock for this node
5932 if (buffer == NULL) {
5933 seq_puts(m, " buffer free\n");
5936 if (buffer->target_node)
5937 seq_printf(m, " node %d", buffer->target_node->debug_id);
5938 seq_printf(m, " size %zd:%zd data %pK\n",
5939 buffer->data_size, buffer->offsets_size,
5943 static void print_binder_work_ilocked(struct seq_file *m,
5944 struct binder_proc *proc,
5946 const char *transaction_prefix,
5947 struct binder_work *w)
5949 struct binder_node *node;
5950 struct binder_transaction *t;
5953 case BINDER_WORK_TRANSACTION:
5954 t = container_of(w, struct binder_transaction, work);
5955 print_binder_transaction_ilocked(
5956 m, proc, transaction_prefix, t);
5958 case BINDER_WORK_RETURN_ERROR: {
5959 struct binder_error *e = container_of(
5960 w, struct binder_error, work);
5962 seq_printf(m, "%stransaction error: %u\n",
5965 case BINDER_WORK_TRANSACTION_COMPLETE:
5966 seq_printf(m, "%stransaction complete\n", prefix);
5968 case BINDER_WORK_NODE:
5969 node = container_of(w, struct binder_node, work);
5970 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5971 prefix, node->debug_id,
5972 (u64)node->ptr, (u64)node->cookie);
5974 case BINDER_WORK_DEAD_BINDER:
5975 seq_printf(m, "%shas dead binder\n", prefix);
5977 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5978 seq_printf(m, "%shas cleared dead binder\n", prefix);
5980 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5981 seq_printf(m, "%shas cleared death notification\n", prefix);
5984 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5989 static void print_binder_thread_ilocked(struct seq_file *m,
5990 struct binder_thread *thread,
5993 struct binder_transaction *t;
5994 struct binder_work *w;
5995 size_t start_pos = m->count;
5998 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5999 thread->pid, thread->looper,
6000 thread->looper_need_return,
6001 atomic_read(&thread->tmp_ref));
6002 header_pos = m->count;
6003 t = thread->transaction_stack;
6005 if (t->from == thread) {
6006 print_binder_transaction_ilocked(m, thread->proc,
6007 " outgoing transaction", t);
6009 } else if (t->to_thread == thread) {
6010 print_binder_transaction_ilocked(m, thread->proc,
6011 " incoming transaction", t);
6014 print_binder_transaction_ilocked(m, thread->proc,
6015 " bad transaction", t);
6019 list_for_each_entry(w, &thread->todo, entry) {
6020 print_binder_work_ilocked(m, thread->proc, " ",
6021 " pending transaction", w);
6023 if (!print_always && m->count == header_pos)
6024 m->count = start_pos;
6027 static void print_binder_node_nilocked(struct seq_file *m,
6028 struct binder_node *node)
6030 struct binder_ref *ref;
6031 struct binder_work *w;
6035 hlist_for_each_entry(ref, &node->refs, node_entry)
6038 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6039 node->debug_id, (u64)node->ptr, (u64)node->cookie,
6040 node->has_strong_ref, node->has_weak_ref,
6041 node->local_strong_refs, node->local_weak_refs,
6042 node->internal_strong_refs, count, node->tmp_refs);
6044 seq_puts(m, " proc");
6045 hlist_for_each_entry(ref, &node->refs, node_entry)
6046 seq_printf(m, " %d", ref->proc->pid);
6050 list_for_each_entry(w, &node->async_todo, entry)
6051 print_binder_work_ilocked(m, node->proc, " ",
6052 " pending async transaction", w);
6056 static void print_binder_ref_olocked(struct seq_file *m,
6057 struct binder_ref *ref)
6059 binder_node_lock(ref->node);
6060 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
6061 ref->data.debug_id, ref->data.desc,
6062 ref->node->proc ? "" : "dead ",
6063 ref->node->debug_id, ref->data.strong,
6064 ref->data.weak, ref->death);
6065 binder_node_unlock(ref->node);
6068 static void print_binder_proc(struct seq_file *m,
6069 struct binder_proc *proc, int print_all)
6071 struct binder_work *w;
6073 size_t start_pos = m->count;
6075 struct binder_node *last_node = NULL;
6077 seq_printf(m, "proc %d\n", proc->pid);
6078 seq_printf(m, "context %s\n", proc->context->name);
6079 header_pos = m->count;
6081 binder_inner_proc_lock(proc);
6082 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6083 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6084 rb_node), print_all);
6086 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6087 struct binder_node *node = rb_entry(n, struct binder_node,
6089 if (!print_all && !node->has_async_transaction)
6093 * take a temporary reference on the node so it
6094 * survives and isn't removed from the tree
6095 * while we print it.
6097 binder_inc_node_tmpref_ilocked(node);
6098 /* Need to drop inner lock to take node lock */
6099 binder_inner_proc_unlock(proc);
6101 binder_put_node(last_node);
6102 binder_node_inner_lock(node);
6103 print_binder_node_nilocked(m, node);
6104 binder_node_inner_unlock(node);
6106 binder_inner_proc_lock(proc);
6108 binder_inner_proc_unlock(proc);
6110 binder_put_node(last_node);
6113 binder_proc_lock(proc);
6114 for (n = rb_first(&proc->refs_by_desc);
6117 print_binder_ref_olocked(m, rb_entry(n,
6120 binder_proc_unlock(proc);
6122 binder_alloc_print_allocated(m, &proc->alloc);
6123 binder_inner_proc_lock(proc);
6124 list_for_each_entry(w, &proc->todo, entry)
6125 print_binder_work_ilocked(m, proc, " ",
6126 " pending transaction", w);
6127 list_for_each_entry(w, &proc->delivered_death, entry) {
6128 seq_puts(m, " has delivered dead binder\n");
6131 binder_inner_proc_unlock(proc);
6132 if (!print_all && m->count == header_pos)
6133 m->count = start_pos;
6136 static const char * const binder_return_strings[] = {
6141 "BR_ACQUIRE_RESULT",
6143 "BR_TRANSACTION_COMPLETE",
6148 "BR_ATTEMPT_ACQUIRE",
6153 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6157 static const char * const binder_command_strings[] = {
6160 "BC_ACQUIRE_RESULT",
6168 "BC_ATTEMPT_ACQUIRE",
6169 "BC_REGISTER_LOOPER",
6172 "BC_REQUEST_DEATH_NOTIFICATION",
6173 "BC_CLEAR_DEATH_NOTIFICATION",
6174 "BC_DEAD_BINDER_DONE",
6175 "BC_TRANSACTION_SG",
6179 static const char * const binder_objstat_strings[] = {
6186 "transaction_complete"
6189 static void print_binder_stats(struct seq_file *m, const char *prefix,
6190 struct binder_stats *stats)
6194 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6195 ARRAY_SIZE(binder_command_strings));
6196 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6197 int temp = atomic_read(&stats->bc[i]);
6200 seq_printf(m, "%s%s: %d\n", prefix,
6201 binder_command_strings[i], temp);
6204 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6205 ARRAY_SIZE(binder_return_strings));
6206 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6207 int temp = atomic_read(&stats->br[i]);
6210 seq_printf(m, "%s%s: %d\n", prefix,
6211 binder_return_strings[i], temp);
6214 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6215 ARRAY_SIZE(binder_objstat_strings));
6216 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6217 ARRAY_SIZE(stats->obj_deleted));
6218 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6219 int created = atomic_read(&stats->obj_created[i]);
6220 int deleted = atomic_read(&stats->obj_deleted[i]);
6222 if (created || deleted)
6223 seq_printf(m, "%s%s: active %d total %d\n",
6225 binder_objstat_strings[i],
6231 static void print_binder_proc_stats(struct seq_file *m,
6232 struct binder_proc *proc)
6234 struct binder_work *w;
6235 struct binder_thread *thread;
6237 int count, strong, weak, ready_threads;
6238 size_t free_async_space =
6239 binder_alloc_get_free_async_space(&proc->alloc);
6241 seq_printf(m, "proc %d\n", proc->pid);
6242 seq_printf(m, "context %s\n", proc->context->name);
6245 binder_inner_proc_lock(proc);
6246 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6249 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6252 seq_printf(m, " threads: %d\n", count);
6253 seq_printf(m, " requested threads: %d+%d/%d\n"
6254 " ready threads %d\n"
6255 " free async space %zd\n", proc->requested_threads,
6256 proc->requested_threads_started, proc->max_threads,
6260 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6262 binder_inner_proc_unlock(proc);
6263 seq_printf(m, " nodes: %d\n", count);
6267 binder_proc_lock(proc);
6268 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6269 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6272 strong += ref->data.strong;
6273 weak += ref->data.weak;
6275 binder_proc_unlock(proc);
6276 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6278 count = binder_alloc_get_allocated_count(&proc->alloc);
6279 seq_printf(m, " buffers: %d\n", count);
6281 binder_alloc_print_pages(m, &proc->alloc);
6284 binder_inner_proc_lock(proc);
6285 list_for_each_entry(w, &proc->todo, entry) {
6286 if (w->type == BINDER_WORK_TRANSACTION)
6289 binder_inner_proc_unlock(proc);
6290 seq_printf(m, " pending transactions: %d\n", count);
6292 print_binder_stats(m, " ", &proc->stats);
6296 int binder_state_show(struct seq_file *m, void *unused)
6298 struct binder_proc *proc;
6299 struct binder_node *node;
6300 struct binder_node *last_node = NULL;
6302 seq_puts(m, "binder state:\n");
6304 spin_lock(&binder_dead_nodes_lock);
6305 if (!hlist_empty(&binder_dead_nodes))
6306 seq_puts(m, "dead nodes:\n");
6307 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6309 * take a temporary reference on the node so it
6310 * survives and isn't removed from the list
6311 * while we print it.
6314 spin_unlock(&binder_dead_nodes_lock);
6316 binder_put_node(last_node);
6317 binder_node_lock(node);
6318 print_binder_node_nilocked(m, node);
6319 binder_node_unlock(node);
6321 spin_lock(&binder_dead_nodes_lock);
6323 spin_unlock(&binder_dead_nodes_lock);
6325 binder_put_node(last_node);
6327 mutex_lock(&binder_procs_lock);
6328 hlist_for_each_entry(proc, &binder_procs, proc_node)
6329 print_binder_proc(m, proc, 1);
6330 mutex_unlock(&binder_procs_lock);
6335 int binder_stats_show(struct seq_file *m, void *unused)
6337 struct binder_proc *proc;
6339 seq_puts(m, "binder stats:\n");
6341 print_binder_stats(m, "", &binder_stats);
6343 mutex_lock(&binder_procs_lock);
6344 hlist_for_each_entry(proc, &binder_procs, proc_node)
6345 print_binder_proc_stats(m, proc);
6346 mutex_unlock(&binder_procs_lock);
6351 int binder_transactions_show(struct seq_file *m, void *unused)
6353 struct binder_proc *proc;
6355 seq_puts(m, "binder transactions:\n");
6356 mutex_lock(&binder_procs_lock);
6357 hlist_for_each_entry(proc, &binder_procs, proc_node)
6358 print_binder_proc(m, proc, 0);
6359 mutex_unlock(&binder_procs_lock);
6364 static int proc_show(struct seq_file *m, void *unused)
6366 struct binder_proc *itr;
6367 int pid = (unsigned long)m->private;
6369 mutex_lock(&binder_procs_lock);
6370 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6371 if (itr->pid == pid) {
6372 seq_puts(m, "binder proc state:\n");
6373 print_binder_proc(m, itr, 1);
6376 mutex_unlock(&binder_procs_lock);
6381 static void print_binder_transaction_log_entry(struct seq_file *m,
6382 struct binder_transaction_log_entry *e)
6384 int debug_id = READ_ONCE(e->debug_id_done);
6386 * read barrier to guarantee debug_id_done read before
6387 * we print the log values
6391 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6392 e->debug_id, (e->call_type == 2) ? "reply" :
6393 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6394 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6395 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6396 e->return_error, e->return_error_param,
6397 e->return_error_line);
6399 * read-barrier to guarantee read of debug_id_done after
6400 * done printing the fields of the entry
6403 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6404 "\n" : " (incomplete)\n");
6407 int binder_transaction_log_show(struct seq_file *m, void *unused)
6409 struct binder_transaction_log *log = m->private;
6410 unsigned int log_cur = atomic_read(&log->cur);
6415 count = log_cur + 1;
6416 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6417 0 : count % ARRAY_SIZE(log->entry);
6418 if (count > ARRAY_SIZE(log->entry) || log->full)
6419 count = ARRAY_SIZE(log->entry);
6420 for (i = 0; i < count; i++) {
6421 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6423 print_binder_transaction_log_entry(m, &log->entry[index]);
6428 const struct file_operations binder_fops = {
6429 .owner = THIS_MODULE,
6430 .poll = binder_poll,
6431 .unlocked_ioctl = binder_ioctl,
6432 .compat_ioctl = compat_ptr_ioctl,
6433 .mmap = binder_mmap,
6434 .open = binder_open,
6435 .flush = binder_flush,
6436 .release = binder_release,
6437 .may_pollfree = true,
6440 static int __init init_binder_device(const char *name)
6443 struct binder_device *binder_device;
6445 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6449 binder_device->miscdev.fops = &binder_fops;
6450 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6451 binder_device->miscdev.name = name;
6453 refcount_set(&binder_device->ref, 1);
6454 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6455 binder_device->context.name = name;
6456 mutex_init(&binder_device->context.context_mgr_node_lock);
6458 ret = misc_register(&binder_device->miscdev);
6460 kfree(binder_device);
6464 hlist_add_head(&binder_device->hlist, &binder_devices);
6469 static int __init binder_init(void)
6472 char *device_name, *device_tmp;
6473 struct binder_device *device;
6474 struct hlist_node *tmp;
6475 char *device_names = NULL;
6477 ret = binder_alloc_shrinker_init();
6481 atomic_set(&binder_transaction_log.cur, ~0U);
6482 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6484 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6485 if (binder_debugfs_dir_entry_root)
6486 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6487 binder_debugfs_dir_entry_root);
6489 if (binder_debugfs_dir_entry_root) {
6490 debugfs_create_file("state",
6492 binder_debugfs_dir_entry_root,
6494 &binder_state_fops);
6495 debugfs_create_file("stats",
6497 binder_debugfs_dir_entry_root,
6499 &binder_stats_fops);
6500 debugfs_create_file("transactions",
6502 binder_debugfs_dir_entry_root,
6504 &binder_transactions_fops);
6505 debugfs_create_file("transaction_log",
6507 binder_debugfs_dir_entry_root,
6508 &binder_transaction_log,
6509 &binder_transaction_log_fops);
6510 debugfs_create_file("failed_transaction_log",
6512 binder_debugfs_dir_entry_root,
6513 &binder_transaction_log_failed,
6514 &binder_transaction_log_fops);
6517 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6518 strcmp(binder_devices_param, "") != 0) {
6520 * Copy the module_parameter string, because we don't want to
6521 * tokenize it in-place.
6523 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6524 if (!device_names) {
6526 goto err_alloc_device_names_failed;
6529 device_tmp = device_names;
6530 while ((device_name = strsep(&device_tmp, ","))) {
6531 ret = init_binder_device(device_name);
6533 goto err_init_binder_device_failed;
6537 ret = init_binderfs();
6539 goto err_init_binder_device_failed;
6543 err_init_binder_device_failed:
6544 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6545 misc_deregister(&device->miscdev);
6546 hlist_del(&device->hlist);
6550 kfree(device_names);
6552 err_alloc_device_names_failed:
6553 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6554 binder_alloc_shrinker_exit();
6559 device_initcall(binder_init);
6561 #define CREATE_TRACE_POINTS
6562 #include "binder_trace.h"
6564 MODULE_LICENSE("GPL v2");