1 // SPDX-License-Identifier: GPL-2.0-only
4 * Android IPC Subsystem
6 * Copyright (C) 2007-2008 Google, Inc.
12 * There are 3 main spinlocks which must be acquired in the
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
69 #include <uapi/linux/android/binder.h>
70 #include <uapi/linux/android/binderfs.h>
72 #include <asm/cacheflush.h>
74 #include "binder_alloc.h"
75 #include "binder_internal.h"
76 #include "binder_trace.h"
78 static HLIST_HEAD(binder_deferred_list);
79 static DEFINE_MUTEX(binder_deferred_lock);
81 static HLIST_HEAD(binder_devices);
82 static HLIST_HEAD(binder_procs);
83 static DEFINE_MUTEX(binder_procs_lock);
85 static HLIST_HEAD(binder_dead_nodes);
86 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
88 static struct dentry *binder_debugfs_dir_entry_root;
89 static struct dentry *binder_debugfs_dir_entry_proc;
90 static atomic_t binder_last_id;
92 static int proc_show(struct seq_file *m, void *unused);
93 DEFINE_SHOW_ATTRIBUTE(proc);
95 /* This is only defined in include/asm-arm/sizes.h */
100 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
103 BINDER_DEBUG_USER_ERROR = 1U << 0,
104 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
105 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
106 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
107 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
108 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
109 BINDER_DEBUG_READ_WRITE = 1U << 6,
110 BINDER_DEBUG_USER_REFS = 1U << 7,
111 BINDER_DEBUG_THREADS = 1U << 8,
112 BINDER_DEBUG_TRANSACTION = 1U << 9,
113 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
114 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
115 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
116 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
117 BINDER_DEBUG_SPINLOCKS = 1U << 14,
119 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
120 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
121 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
123 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
124 module_param_named(devices, binder_devices_param, charp, 0444);
126 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
127 static int binder_stop_on_user_error;
129 static int binder_set_stop_on_user_error(const char *val,
130 const struct kernel_param *kp)
134 ret = param_set_int(val, kp);
135 if (binder_stop_on_user_error < 2)
136 wake_up(&binder_user_error_wait);
139 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
140 param_get_int, &binder_stop_on_user_error, 0644);
142 #define binder_debug(mask, x...) \
144 if (binder_debug_mask & mask) \
145 pr_info_ratelimited(x); \
148 #define binder_user_error(x...) \
150 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
151 pr_info_ratelimited(x); \
152 if (binder_stop_on_user_error) \
153 binder_stop_on_user_error = 2; \
156 #define to_flat_binder_object(hdr) \
157 container_of(hdr, struct flat_binder_object, hdr)
159 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
161 #define to_binder_buffer_object(hdr) \
162 container_of(hdr, struct binder_buffer_object, hdr)
164 #define to_binder_fd_array_object(hdr) \
165 container_of(hdr, struct binder_fd_array_object, hdr)
167 enum binder_stat_types {
173 BINDER_STAT_TRANSACTION,
174 BINDER_STAT_TRANSACTION_COMPLETE,
178 struct binder_stats {
179 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
180 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
181 atomic_t obj_created[BINDER_STAT_COUNT];
182 atomic_t obj_deleted[BINDER_STAT_COUNT];
185 static struct binder_stats binder_stats;
187 static inline void binder_stats_deleted(enum binder_stat_types type)
189 atomic_inc(&binder_stats.obj_deleted[type]);
192 static inline void binder_stats_created(enum binder_stat_types type)
194 atomic_inc(&binder_stats.obj_created[type]);
197 struct binder_transaction_log binder_transaction_log;
198 struct binder_transaction_log binder_transaction_log_failed;
200 static struct binder_transaction_log_entry *binder_transaction_log_add(
201 struct binder_transaction_log *log)
203 struct binder_transaction_log_entry *e;
204 unsigned int cur = atomic_inc_return(&log->cur);
206 if (cur >= ARRAY_SIZE(log->entry))
208 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
209 WRITE_ONCE(e->debug_id_done, 0);
211 * write-barrier to synchronize access to e->debug_id_done.
212 * We make sure the initialized 0 value is seen before
213 * memset() other fields are zeroed by memset.
216 memset(e, 0, sizeof(*e));
221 * struct binder_work - work enqueued on a worklist
222 * @entry: node enqueued on list
223 * @type: type of work to be performed
225 * There are separate work lists for proc, thread, and node (async).
228 struct list_head entry;
230 enum binder_work_type {
231 BINDER_WORK_TRANSACTION = 1,
232 BINDER_WORK_TRANSACTION_COMPLETE,
233 BINDER_WORK_RETURN_ERROR,
235 BINDER_WORK_DEAD_BINDER,
236 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
237 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
241 struct binder_error {
242 struct binder_work work;
247 * struct binder_node - binder node bookkeeping
248 * @debug_id: unique ID for debugging
249 * (invariant after initialized)
250 * @lock: lock for node fields
251 * @work: worklist element for node work
252 * (protected by @proc->inner_lock)
253 * @rb_node: element for proc->nodes tree
254 * (protected by @proc->inner_lock)
255 * @dead_node: element for binder_dead_nodes list
256 * (protected by binder_dead_nodes_lock)
257 * @proc: binder_proc that owns this node
258 * (invariant after initialized)
259 * @refs: list of references on this node
260 * (protected by @lock)
261 * @internal_strong_refs: used to take strong references when
262 * initiating a transaction
263 * (protected by @proc->inner_lock if @proc
265 * @local_weak_refs: weak user refs from local process
266 * (protected by @proc->inner_lock if @proc
268 * @local_strong_refs: strong user refs from local process
269 * (protected by @proc->inner_lock if @proc
271 * @tmp_refs: temporary kernel refs
272 * (protected by @proc->inner_lock while @proc
273 * is valid, and by binder_dead_nodes_lock
274 * if @proc is NULL. During inc/dec and node release
275 * it is also protected by @lock to provide safety
276 * as the node dies and @proc becomes NULL)
277 * @ptr: userspace pointer for node
278 * (invariant, no lock needed)
279 * @cookie: userspace cookie for node
280 * (invariant, no lock needed)
281 * @has_strong_ref: userspace notified of strong ref
282 * (protected by @proc->inner_lock if @proc
284 * @pending_strong_ref: userspace has acked notification of strong ref
285 * (protected by @proc->inner_lock if @proc
287 * @has_weak_ref: userspace notified of weak ref
288 * (protected by @proc->inner_lock if @proc
290 * @pending_weak_ref: userspace has acked notification of weak ref
291 * (protected by @proc->inner_lock if @proc
293 * @has_async_transaction: async transaction to node in progress
294 * (protected by @lock)
295 * @accept_fds: file descriptor operations supported for node
296 * (invariant after initialized)
297 * @min_priority: minimum scheduling priority
298 * (invariant after initialized)
299 * @txn_security_ctx: require sender's security context
300 * (invariant after initialized)
301 * @async_todo: list of async work items
302 * (protected by @proc->inner_lock)
304 * Bookkeeping structure for binder nodes.
309 struct binder_work work;
311 struct rb_node rb_node;
312 struct hlist_node dead_node;
314 struct binder_proc *proc;
315 struct hlist_head refs;
316 int internal_strong_refs;
318 int local_strong_refs;
320 binder_uintptr_t ptr;
321 binder_uintptr_t cookie;
324 * bitfield elements protected by
328 u8 pending_strong_ref:1;
330 u8 pending_weak_ref:1;
334 * invariant after initialization
337 u8 txn_security_ctx:1;
340 bool has_async_transaction;
341 struct list_head async_todo;
344 struct binder_ref_death {
346 * @work: worklist element for death notifications
347 * (protected by inner_lock of the proc that
348 * this ref belongs to)
350 struct binder_work work;
351 binder_uintptr_t cookie;
355 * struct binder_ref_data - binder_ref counts and id
356 * @debug_id: unique ID for the ref
357 * @desc: unique userspace handle for ref
358 * @strong: strong ref count (debugging only if not locked)
359 * @weak: weak ref count (debugging only if not locked)
361 * Structure to hold ref count and ref id information. Since
362 * the actual ref can only be accessed with a lock, this structure
363 * is used to return information about the ref to callers of
364 * ref inc/dec functions.
366 struct binder_ref_data {
374 * struct binder_ref - struct to track references on nodes
375 * @data: binder_ref_data containing id, handle, and current refcounts
376 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
377 * @rb_node_node: node for lookup by @node in proc's rb_tree
378 * @node_entry: list entry for node->refs list in target node
379 * (protected by @node->lock)
380 * @proc: binder_proc containing ref
381 * @node: binder_node of target node. When cleaning up a
382 * ref for deletion in binder_cleanup_ref, a non-NULL
383 * @node indicates the node must be freed
384 * @death: pointer to death notification (ref_death) if requested
385 * (protected by @node->lock)
387 * Structure to track references from procA to target node (on procB). This
388 * structure is unsafe to access without holding @proc->outer_lock.
391 /* Lookups needed: */
392 /* node + proc => ref (transaction) */
393 /* desc + proc => ref (transaction, inc/dec ref) */
394 /* node => refs + procs (proc exit) */
395 struct binder_ref_data data;
396 struct rb_node rb_node_desc;
397 struct rb_node rb_node_node;
398 struct hlist_node node_entry;
399 struct binder_proc *proc;
400 struct binder_node *node;
401 struct binder_ref_death *death;
404 enum binder_deferred_state {
405 BINDER_DEFERRED_FLUSH = 0x01,
406 BINDER_DEFERRED_RELEASE = 0x02,
410 * struct binder_proc - binder process bookkeeping
411 * @proc_node: element for binder_procs list
412 * @threads: rbtree of binder_threads in this proc
413 * (protected by @inner_lock)
414 * @nodes: rbtree of binder nodes associated with
415 * this proc ordered by node->ptr
416 * (protected by @inner_lock)
417 * @refs_by_desc: rbtree of refs ordered by ref->desc
418 * (protected by @outer_lock)
419 * @refs_by_node: rbtree of refs ordered by ref->node
420 * (protected by @outer_lock)
421 * @waiting_threads: threads currently waiting for proc work
422 * (protected by @inner_lock)
423 * @pid PID of group_leader of process
424 * (invariant after initialized)
425 * @tsk task_struct for group_leader of process
426 * (invariant after initialized)
427 * @cred struct cred associated with the `struct file`
429 * (invariant after initialized)
430 * @deferred_work_node: element for binder_deferred_list
431 * (protected by binder_deferred_lock)
432 * @deferred_work: bitmap of deferred work to perform
433 * (protected by binder_deferred_lock)
434 * @is_dead: process is dead and awaiting free
435 * when outstanding transactions are cleaned up
436 * (protected by @inner_lock)
437 * @todo: list of work for this process
438 * (protected by @inner_lock)
439 * @stats: per-process binder statistics
440 * (atomics, no lock needed)
441 * @delivered_death: list of delivered death notification
442 * (protected by @inner_lock)
443 * @max_threads: cap on number of binder threads
444 * (protected by @inner_lock)
445 * @requested_threads: number of binder threads requested but not
446 * yet started. In current implementation, can
448 * (protected by @inner_lock)
449 * @requested_threads_started: number binder threads started
450 * (protected by @inner_lock)
451 * @tmp_ref: temporary reference to indicate proc is in use
452 * (protected by @inner_lock)
453 * @default_priority: default scheduler priority
454 * (invariant after initialized)
455 * @debugfs_entry: debugfs node
456 * @alloc: binder allocator bookkeeping
457 * @context: binder_context for this proc
458 * (invariant after initialized)
459 * @inner_lock: can nest under outer_lock and/or node lock
460 * @outer_lock: no nesting under innor or node lock
461 * Lock order: 1) outer, 2) node, 3) inner
462 * @binderfs_entry: process-specific binderfs log file
464 * Bookkeeping structure for binder processes
467 struct hlist_node proc_node;
468 struct rb_root threads;
469 struct rb_root nodes;
470 struct rb_root refs_by_desc;
471 struct rb_root refs_by_node;
472 struct list_head waiting_threads;
474 struct task_struct *tsk;
475 const struct cred *cred;
476 struct hlist_node deferred_work_node;
480 struct list_head todo;
481 struct binder_stats stats;
482 struct list_head delivered_death;
484 int requested_threads;
485 int requested_threads_started;
487 long default_priority;
488 struct dentry *debugfs_entry;
489 struct binder_alloc alloc;
490 struct binder_context *context;
491 spinlock_t inner_lock;
492 spinlock_t outer_lock;
493 struct dentry *binderfs_entry;
497 BINDER_LOOPER_STATE_REGISTERED = 0x01,
498 BINDER_LOOPER_STATE_ENTERED = 0x02,
499 BINDER_LOOPER_STATE_EXITED = 0x04,
500 BINDER_LOOPER_STATE_INVALID = 0x08,
501 BINDER_LOOPER_STATE_WAITING = 0x10,
502 BINDER_LOOPER_STATE_POLL = 0x20,
506 * struct binder_thread - binder thread bookkeeping
507 * @proc: binder process for this thread
508 * (invariant after initialization)
509 * @rb_node: element for proc->threads rbtree
510 * (protected by @proc->inner_lock)
511 * @waiting_thread_node: element for @proc->waiting_threads list
512 * (protected by @proc->inner_lock)
513 * @pid: PID for this thread
514 * (invariant after initialization)
515 * @looper: bitmap of looping state
516 * (only accessed by this thread)
517 * @looper_needs_return: looping thread needs to exit driver
519 * @transaction_stack: stack of in-progress transactions for this thread
520 * (protected by @proc->inner_lock)
521 * @todo: list of work to do for this thread
522 * (protected by @proc->inner_lock)
523 * @process_todo: whether work in @todo should be processed
524 * (protected by @proc->inner_lock)
525 * @return_error: transaction errors reported by this thread
526 * (only accessed by this thread)
527 * @reply_error: transaction errors reported by target thread
528 * (protected by @proc->inner_lock)
529 * @wait: wait queue for thread work
530 * @stats: per-thread statistics
531 * (atomics, no lock needed)
532 * @tmp_ref: temporary reference to indicate thread is in use
533 * (atomic since @proc->inner_lock cannot
534 * always be acquired)
535 * @is_dead: thread is dead and awaiting free
536 * when outstanding transactions are cleaned up
537 * (protected by @proc->inner_lock)
539 * Bookkeeping structure for binder threads.
541 struct binder_thread {
542 struct binder_proc *proc;
543 struct rb_node rb_node;
544 struct list_head waiting_thread_node;
546 int looper; /* only modified by this thread */
547 bool looper_need_return; /* can be written by other thread */
548 struct binder_transaction *transaction_stack;
549 struct list_head todo;
551 struct binder_error return_error;
552 struct binder_error reply_error;
553 wait_queue_head_t wait;
554 struct binder_stats stats;
560 * struct binder_txn_fd_fixup - transaction fd fixup list element
561 * @fixup_entry: list entry
562 * @file: struct file to be associated with new fd
563 * @offset: offset in buffer data to this fixup
565 * List element for fd fixups in a transaction. Since file
566 * descriptors need to be allocated in the context of the
567 * target process, we pass each fd to be processed in this
570 struct binder_txn_fd_fixup {
571 struct list_head fixup_entry;
576 struct binder_transaction {
578 struct binder_work work;
579 struct binder_thread *from;
580 struct binder_transaction *from_parent;
581 struct binder_proc *to_proc;
582 struct binder_thread *to_thread;
583 struct binder_transaction *to_parent;
584 unsigned need_reply:1;
585 /* unsigned is_dead:1; */ /* not used at the moment */
587 struct binder_buffer *buffer;
593 struct list_head fd_fixups;
594 binder_uintptr_t security_ctx;
596 * @lock: protects @from, @to_proc, and @to_thread
598 * @from, @to_proc, and @to_thread can be set to NULL
599 * during thread teardown
605 * struct binder_object - union of flat binder object types
606 * @hdr: generic object header
607 * @fbo: binder object (nodes and refs)
608 * @fdo: file descriptor object
609 * @bbo: binder buffer pointer
610 * @fdao: file descriptor array
612 * Used for type-independent object copies
614 struct binder_object {
616 struct binder_object_header hdr;
617 struct flat_binder_object fbo;
618 struct binder_fd_object fdo;
619 struct binder_buffer_object bbo;
620 struct binder_fd_array_object fdao;
625 * binder_proc_lock() - Acquire outer lock for given binder_proc
626 * @proc: struct binder_proc to acquire
628 * Acquires proc->outer_lock. Used to protect binder_ref
629 * structures associated with the given proc.
631 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
633 _binder_proc_lock(struct binder_proc *proc, int line)
634 __acquires(&proc->outer_lock)
636 binder_debug(BINDER_DEBUG_SPINLOCKS,
637 "%s: line=%d\n", __func__, line);
638 spin_lock(&proc->outer_lock);
642 * binder_proc_unlock() - Release spinlock for given binder_proc
643 * @proc: struct binder_proc to acquire
645 * Release lock acquired via binder_proc_lock()
647 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
649 _binder_proc_unlock(struct binder_proc *proc, int line)
650 __releases(&proc->outer_lock)
652 binder_debug(BINDER_DEBUG_SPINLOCKS,
653 "%s: line=%d\n", __func__, line);
654 spin_unlock(&proc->outer_lock);
658 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
659 * @proc: struct binder_proc to acquire
661 * Acquires proc->inner_lock. Used to protect todo lists
663 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
665 _binder_inner_proc_lock(struct binder_proc *proc, int line)
666 __acquires(&proc->inner_lock)
668 binder_debug(BINDER_DEBUG_SPINLOCKS,
669 "%s: line=%d\n", __func__, line);
670 spin_lock(&proc->inner_lock);
674 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
675 * @proc: struct binder_proc to acquire
677 * Release lock acquired via binder_inner_proc_lock()
679 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
681 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
682 __releases(&proc->inner_lock)
684 binder_debug(BINDER_DEBUG_SPINLOCKS,
685 "%s: line=%d\n", __func__, line);
686 spin_unlock(&proc->inner_lock);
690 * binder_node_lock() - Acquire spinlock for given binder_node
691 * @node: struct binder_node to acquire
693 * Acquires node->lock. Used to protect binder_node fields
695 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
697 _binder_node_lock(struct binder_node *node, int line)
698 __acquires(&node->lock)
700 binder_debug(BINDER_DEBUG_SPINLOCKS,
701 "%s: line=%d\n", __func__, line);
702 spin_lock(&node->lock);
706 * binder_node_unlock() - Release spinlock for given binder_proc
707 * @node: struct binder_node to acquire
709 * Release lock acquired via binder_node_lock()
711 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
713 _binder_node_unlock(struct binder_node *node, int line)
714 __releases(&node->lock)
716 binder_debug(BINDER_DEBUG_SPINLOCKS,
717 "%s: line=%d\n", __func__, line);
718 spin_unlock(&node->lock);
722 * binder_node_inner_lock() - Acquire node and inner locks
723 * @node: struct binder_node to acquire
725 * Acquires node->lock. If node->proc also acquires
726 * proc->inner_lock. Used to protect binder_node fields
728 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
730 _binder_node_inner_lock(struct binder_node *node, int line)
731 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
733 binder_debug(BINDER_DEBUG_SPINLOCKS,
734 "%s: line=%d\n", __func__, line);
735 spin_lock(&node->lock);
737 binder_inner_proc_lock(node->proc);
739 /* annotation for sparse */
740 __acquire(&node->proc->inner_lock);
744 * binder_node_unlock() - Release node and inner locks
745 * @node: struct binder_node to acquire
747 * Release lock acquired via binder_node_lock()
749 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
751 _binder_node_inner_unlock(struct binder_node *node, int line)
752 __releases(&node->lock) __releases(&node->proc->inner_lock)
754 struct binder_proc *proc = node->proc;
756 binder_debug(BINDER_DEBUG_SPINLOCKS,
757 "%s: line=%d\n", __func__, line);
759 binder_inner_proc_unlock(proc);
761 /* annotation for sparse */
762 __release(&node->proc->inner_lock);
763 spin_unlock(&node->lock);
766 static bool binder_worklist_empty_ilocked(struct list_head *list)
768 return list_empty(list);
772 * binder_worklist_empty() - Check if no items on the work list
773 * @proc: binder_proc associated with list
774 * @list: list to check
776 * Return: true if there are no items on list, else false
778 static bool binder_worklist_empty(struct binder_proc *proc,
779 struct list_head *list)
783 binder_inner_proc_lock(proc);
784 ret = binder_worklist_empty_ilocked(list);
785 binder_inner_proc_unlock(proc);
790 * binder_enqueue_work_ilocked() - Add an item to the work list
791 * @work: struct binder_work to add to list
792 * @target_list: list to add work to
794 * Adds the work to the specified list. Asserts that work
795 * is not already on a list.
797 * Requires the proc->inner_lock to be held.
800 binder_enqueue_work_ilocked(struct binder_work *work,
801 struct list_head *target_list)
803 BUG_ON(target_list == NULL);
804 BUG_ON(work->entry.next && !list_empty(&work->entry));
805 list_add_tail(&work->entry, target_list);
809 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
810 * @thread: thread to queue work to
811 * @work: struct binder_work to add to list
813 * Adds the work to the todo list of the thread. Doesn't set the process_todo
814 * flag, which means that (if it wasn't already set) the thread will go to
815 * sleep without handling this work when it calls read.
817 * Requires the proc->inner_lock to be held.
820 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
821 struct binder_work *work)
823 WARN_ON(!list_empty(&thread->waiting_thread_node));
824 binder_enqueue_work_ilocked(work, &thread->todo);
828 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
829 * @thread: thread to queue work to
830 * @work: struct binder_work to add to list
832 * Adds the work to the todo list of the thread, and enables processing
835 * Requires the proc->inner_lock to be held.
838 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
839 struct binder_work *work)
841 WARN_ON(!list_empty(&thread->waiting_thread_node));
842 binder_enqueue_work_ilocked(work, &thread->todo);
843 thread->process_todo = true;
847 * binder_enqueue_thread_work() - Add an item to the thread work list
848 * @thread: thread to queue work to
849 * @work: struct binder_work to add to list
851 * Adds the work to the todo list of the thread, and enables processing
855 binder_enqueue_thread_work(struct binder_thread *thread,
856 struct binder_work *work)
858 binder_inner_proc_lock(thread->proc);
859 binder_enqueue_thread_work_ilocked(thread, work);
860 binder_inner_proc_unlock(thread->proc);
864 binder_dequeue_work_ilocked(struct binder_work *work)
866 list_del_init(&work->entry);
870 * binder_dequeue_work() - Removes an item from the work list
871 * @proc: binder_proc associated with list
872 * @work: struct binder_work to remove from list
874 * Removes the specified work item from whatever list it is on.
875 * Can safely be called if work is not on any list.
878 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
880 binder_inner_proc_lock(proc);
881 binder_dequeue_work_ilocked(work);
882 binder_inner_proc_unlock(proc);
885 static struct binder_work *binder_dequeue_work_head_ilocked(
886 struct list_head *list)
888 struct binder_work *w;
890 w = list_first_entry_or_null(list, struct binder_work, entry);
892 list_del_init(&w->entry);
897 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
898 static void binder_free_thread(struct binder_thread *thread);
899 static void binder_free_proc(struct binder_proc *proc);
900 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
902 static bool binder_has_work_ilocked(struct binder_thread *thread,
905 return thread->process_todo ||
906 thread->looper_need_return ||
908 !binder_worklist_empty_ilocked(&thread->proc->todo));
911 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
915 binder_inner_proc_lock(thread->proc);
916 has_work = binder_has_work_ilocked(thread, do_proc_work);
917 binder_inner_proc_unlock(thread->proc);
922 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
924 return !thread->transaction_stack &&
925 binder_worklist_empty_ilocked(&thread->todo) &&
926 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
927 BINDER_LOOPER_STATE_REGISTERED));
930 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
934 struct binder_thread *thread;
936 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
937 thread = rb_entry(n, struct binder_thread, rb_node);
938 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
939 binder_available_for_proc_work_ilocked(thread)) {
941 wake_up_interruptible_sync(&thread->wait);
943 wake_up_interruptible(&thread->wait);
949 * binder_select_thread_ilocked() - selects a thread for doing proc work.
950 * @proc: process to select a thread from
952 * Note that calling this function moves the thread off the waiting_threads
953 * list, so it can only be woken up by the caller of this function, or a
954 * signal. Therefore, callers *should* always wake up the thread this function
957 * Return: If there's a thread currently waiting for process work,
958 * returns that thread. Otherwise returns NULL.
960 static struct binder_thread *
961 binder_select_thread_ilocked(struct binder_proc *proc)
963 struct binder_thread *thread;
965 assert_spin_locked(&proc->inner_lock);
966 thread = list_first_entry_or_null(&proc->waiting_threads,
967 struct binder_thread,
968 waiting_thread_node);
971 list_del_init(&thread->waiting_thread_node);
977 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
978 * @proc: process to wake up a thread in
979 * @thread: specific thread to wake-up (may be NULL)
980 * @sync: whether to do a synchronous wake-up
982 * This function wakes up a thread in the @proc process.
983 * The caller may provide a specific thread to wake-up in
984 * the @thread parameter. If @thread is NULL, this function
985 * will wake up threads that have called poll().
987 * Note that for this function to work as expected, callers
988 * should first call binder_select_thread() to find a thread
989 * to handle the work (if they don't have a thread already),
990 * and pass the result into the @thread parameter.
992 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
993 struct binder_thread *thread,
996 assert_spin_locked(&proc->inner_lock);
1000 wake_up_interruptible_sync(&thread->wait);
1002 wake_up_interruptible(&thread->wait);
1006 /* Didn't find a thread waiting for proc work; this can happen
1008 * 1. All threads are busy handling transactions
1009 * In that case, one of those threads should call back into
1010 * the kernel driver soon and pick up this work.
1011 * 2. Threads are using the (e)poll interface, in which case
1012 * they may be blocked on the waitqueue without having been
1013 * added to waiting_threads. For this case, we just iterate
1014 * over all threads not handling transaction work, and
1015 * wake them all up. We wake all because we don't know whether
1016 * a thread that called into (e)poll is handling non-binder
1019 binder_wakeup_poll_threads_ilocked(proc, sync);
1022 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1024 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1026 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1029 static void binder_set_nice(long nice)
1033 if (can_nice(current, nice)) {
1034 set_user_nice(current, nice);
1037 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1038 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1039 "%d: nice value %ld not allowed use %ld instead\n",
1040 current->pid, nice, min_nice);
1041 set_user_nice(current, min_nice);
1042 if (min_nice <= MAX_NICE)
1044 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1047 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1048 binder_uintptr_t ptr)
1050 struct rb_node *n = proc->nodes.rb_node;
1051 struct binder_node *node;
1053 assert_spin_locked(&proc->inner_lock);
1056 node = rb_entry(n, struct binder_node, rb_node);
1058 if (ptr < node->ptr)
1060 else if (ptr > node->ptr)
1064 * take an implicit weak reference
1065 * to ensure node stays alive until
1066 * call to binder_put_node()
1068 binder_inc_node_tmpref_ilocked(node);
1075 static struct binder_node *binder_get_node(struct binder_proc *proc,
1076 binder_uintptr_t ptr)
1078 struct binder_node *node;
1080 binder_inner_proc_lock(proc);
1081 node = binder_get_node_ilocked(proc, ptr);
1082 binder_inner_proc_unlock(proc);
1086 static struct binder_node *binder_init_node_ilocked(
1087 struct binder_proc *proc,
1088 struct binder_node *new_node,
1089 struct flat_binder_object *fp)
1091 struct rb_node **p = &proc->nodes.rb_node;
1092 struct rb_node *parent = NULL;
1093 struct binder_node *node;
1094 binder_uintptr_t ptr = fp ? fp->binder : 0;
1095 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1096 __u32 flags = fp ? fp->flags : 0;
1098 assert_spin_locked(&proc->inner_lock);
1103 node = rb_entry(parent, struct binder_node, rb_node);
1105 if (ptr < node->ptr)
1107 else if (ptr > node->ptr)
1108 p = &(*p)->rb_right;
1111 * A matching node is already in
1112 * the rb tree. Abandon the init
1115 binder_inc_node_tmpref_ilocked(node);
1120 binder_stats_created(BINDER_STAT_NODE);
1122 rb_link_node(&node->rb_node, parent, p);
1123 rb_insert_color(&node->rb_node, &proc->nodes);
1124 node->debug_id = atomic_inc_return(&binder_last_id);
1127 node->cookie = cookie;
1128 node->work.type = BINDER_WORK_NODE;
1129 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1130 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1131 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1132 spin_lock_init(&node->lock);
1133 INIT_LIST_HEAD(&node->work.entry);
1134 INIT_LIST_HEAD(&node->async_todo);
1135 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1136 "%d:%d node %d u%016llx c%016llx created\n",
1137 proc->pid, current->pid, node->debug_id,
1138 (u64)node->ptr, (u64)node->cookie);
1143 static struct binder_node *binder_new_node(struct binder_proc *proc,
1144 struct flat_binder_object *fp)
1146 struct binder_node *node;
1147 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1151 binder_inner_proc_lock(proc);
1152 node = binder_init_node_ilocked(proc, new_node, fp);
1153 binder_inner_proc_unlock(proc);
1154 if (node != new_node)
1156 * The node was already added by another thread
1163 static void binder_free_node(struct binder_node *node)
1166 binder_stats_deleted(BINDER_STAT_NODE);
1169 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1171 struct list_head *target_list)
1173 struct binder_proc *proc = node->proc;
1175 assert_spin_locked(&node->lock);
1177 assert_spin_locked(&proc->inner_lock);
1180 if (target_list == NULL &&
1181 node->internal_strong_refs == 0 &&
1183 node == node->proc->context->binder_context_mgr_node &&
1184 node->has_strong_ref)) {
1185 pr_err("invalid inc strong node for %d\n",
1189 node->internal_strong_refs++;
1191 node->local_strong_refs++;
1192 if (!node->has_strong_ref && target_list) {
1193 struct binder_thread *thread = container_of(target_list,
1194 struct binder_thread, todo);
1195 binder_dequeue_work_ilocked(&node->work);
1196 BUG_ON(&thread->todo != target_list);
1197 binder_enqueue_deferred_thread_work_ilocked(thread,
1202 node->local_weak_refs++;
1203 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1204 if (target_list == NULL) {
1205 pr_err("invalid inc weak node for %d\n",
1212 binder_enqueue_work_ilocked(&node->work, target_list);
1218 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1219 struct list_head *target_list)
1223 binder_node_inner_lock(node);
1224 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1225 binder_node_inner_unlock(node);
1230 static bool binder_dec_node_nilocked(struct binder_node *node,
1231 int strong, int internal)
1233 struct binder_proc *proc = node->proc;
1235 assert_spin_locked(&node->lock);
1237 assert_spin_locked(&proc->inner_lock);
1240 node->internal_strong_refs--;
1242 node->local_strong_refs--;
1243 if (node->local_strong_refs || node->internal_strong_refs)
1247 node->local_weak_refs--;
1248 if (node->local_weak_refs || node->tmp_refs ||
1249 !hlist_empty(&node->refs))
1253 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1254 if (list_empty(&node->work.entry)) {
1255 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1256 binder_wakeup_proc_ilocked(proc);
1259 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1260 !node->local_weak_refs && !node->tmp_refs) {
1262 binder_dequeue_work_ilocked(&node->work);
1263 rb_erase(&node->rb_node, &proc->nodes);
1264 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1265 "refless node %d deleted\n",
1268 BUG_ON(!list_empty(&node->work.entry));
1269 spin_lock(&binder_dead_nodes_lock);
1271 * tmp_refs could have changed so
1274 if (node->tmp_refs) {
1275 spin_unlock(&binder_dead_nodes_lock);
1278 hlist_del(&node->dead_node);
1279 spin_unlock(&binder_dead_nodes_lock);
1280 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1281 "dead node %d deleted\n",
1290 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1294 binder_node_inner_lock(node);
1295 free_node = binder_dec_node_nilocked(node, strong, internal);
1296 binder_node_inner_unlock(node);
1298 binder_free_node(node);
1301 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1304 * No call to binder_inc_node() is needed since we
1305 * don't need to inform userspace of any changes to
1312 * binder_inc_node_tmpref() - take a temporary reference on node
1313 * @node: node to reference
1315 * Take reference on node to prevent the node from being freed
1316 * while referenced only by a local variable. The inner lock is
1317 * needed to serialize with the node work on the queue (which
1318 * isn't needed after the node is dead). If the node is dead
1319 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1320 * node->tmp_refs against dead-node-only cases where the node
1321 * lock cannot be acquired (eg traversing the dead node list to
1324 static void binder_inc_node_tmpref(struct binder_node *node)
1326 binder_node_lock(node);
1328 binder_inner_proc_lock(node->proc);
1330 spin_lock(&binder_dead_nodes_lock);
1331 binder_inc_node_tmpref_ilocked(node);
1333 binder_inner_proc_unlock(node->proc);
1335 spin_unlock(&binder_dead_nodes_lock);
1336 binder_node_unlock(node);
1340 * binder_dec_node_tmpref() - remove a temporary reference on node
1341 * @node: node to reference
1343 * Release temporary reference on node taken via binder_inc_node_tmpref()
1345 static void binder_dec_node_tmpref(struct binder_node *node)
1349 binder_node_inner_lock(node);
1351 spin_lock(&binder_dead_nodes_lock);
1353 __acquire(&binder_dead_nodes_lock);
1355 BUG_ON(node->tmp_refs < 0);
1357 spin_unlock(&binder_dead_nodes_lock);
1359 __release(&binder_dead_nodes_lock);
1361 * Call binder_dec_node() to check if all refcounts are 0
1362 * and cleanup is needed. Calling with strong=0 and internal=1
1363 * causes no actual reference to be released in binder_dec_node().
1364 * If that changes, a change is needed here too.
1366 free_node = binder_dec_node_nilocked(node, 0, 1);
1367 binder_node_inner_unlock(node);
1369 binder_free_node(node);
1372 static void binder_put_node(struct binder_node *node)
1374 binder_dec_node_tmpref(node);
1377 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1378 u32 desc, bool need_strong_ref)
1380 struct rb_node *n = proc->refs_by_desc.rb_node;
1381 struct binder_ref *ref;
1384 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1386 if (desc < ref->data.desc) {
1388 } else if (desc > ref->data.desc) {
1390 } else if (need_strong_ref && !ref->data.strong) {
1391 binder_user_error("tried to use weak ref as strong ref\n");
1401 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1402 * @proc: binder_proc that owns the ref
1403 * @node: binder_node of target
1404 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1406 * Look up the ref for the given node and return it if it exists
1408 * If it doesn't exist and the caller provides a newly allocated
1409 * ref, initialize the fields of the newly allocated ref and insert
1410 * into the given proc rb_trees and node refs list.
1412 * Return: the ref for node. It is possible that another thread
1413 * allocated/initialized the ref first in which case the
1414 * returned ref would be different than the passed-in
1415 * new_ref. new_ref must be kfree'd by the caller in
1418 static struct binder_ref *binder_get_ref_for_node_olocked(
1419 struct binder_proc *proc,
1420 struct binder_node *node,
1421 struct binder_ref *new_ref)
1423 struct binder_context *context = proc->context;
1424 struct rb_node **p = &proc->refs_by_node.rb_node;
1425 struct rb_node *parent = NULL;
1426 struct binder_ref *ref;
1431 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1433 if (node < ref->node)
1435 else if (node > ref->node)
1436 p = &(*p)->rb_right;
1443 binder_stats_created(BINDER_STAT_REF);
1444 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1445 new_ref->proc = proc;
1446 new_ref->node = node;
1447 rb_link_node(&new_ref->rb_node_node, parent, p);
1448 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1450 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1451 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1452 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1453 if (ref->data.desc > new_ref->data.desc)
1455 new_ref->data.desc = ref->data.desc + 1;
1458 p = &proc->refs_by_desc.rb_node;
1461 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1463 if (new_ref->data.desc < ref->data.desc)
1465 else if (new_ref->data.desc > ref->data.desc)
1466 p = &(*p)->rb_right;
1470 rb_link_node(&new_ref->rb_node_desc, parent, p);
1471 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1473 binder_node_lock(node);
1474 hlist_add_head(&new_ref->node_entry, &node->refs);
1476 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1477 "%d new ref %d desc %d for node %d\n",
1478 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1480 binder_node_unlock(node);
1484 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1486 bool delete_node = false;
1488 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1489 "%d delete ref %d desc %d for node %d\n",
1490 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1491 ref->node->debug_id);
1493 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1494 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1496 binder_node_inner_lock(ref->node);
1497 if (ref->data.strong)
1498 binder_dec_node_nilocked(ref->node, 1, 1);
1500 hlist_del(&ref->node_entry);
1501 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1502 binder_node_inner_unlock(ref->node);
1504 * Clear ref->node unless we want the caller to free the node
1508 * The caller uses ref->node to determine
1509 * whether the node needs to be freed. Clear
1510 * it since the node is still alive.
1516 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1517 "%d delete ref %d desc %d has death notification\n",
1518 ref->proc->pid, ref->data.debug_id,
1520 binder_dequeue_work(ref->proc, &ref->death->work);
1521 binder_stats_deleted(BINDER_STAT_DEATH);
1523 binder_stats_deleted(BINDER_STAT_REF);
1527 * binder_inc_ref_olocked() - increment the ref for given handle
1528 * @ref: ref to be incremented
1529 * @strong: if true, strong increment, else weak
1530 * @target_list: list to queue node work on
1532 * Increment the ref. @ref->proc->outer_lock must be held on entry
1534 * Return: 0, if successful, else errno
1536 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1537 struct list_head *target_list)
1542 if (ref->data.strong == 0) {
1543 ret = binder_inc_node(ref->node, 1, 1, target_list);
1549 if (ref->data.weak == 0) {
1550 ret = binder_inc_node(ref->node, 0, 1, target_list);
1560 * binder_dec_ref() - dec the ref for given handle
1561 * @ref: ref to be decremented
1562 * @strong: if true, strong decrement, else weak
1564 * Decrement the ref.
1566 * Return: true if ref is cleaned up and ready to be freed
1568 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1571 if (ref->data.strong == 0) {
1572 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1573 ref->proc->pid, ref->data.debug_id,
1574 ref->data.desc, ref->data.strong,
1579 if (ref->data.strong == 0)
1580 binder_dec_node(ref->node, strong, 1);
1582 if (ref->data.weak == 0) {
1583 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1584 ref->proc->pid, ref->data.debug_id,
1585 ref->data.desc, ref->data.strong,
1591 if (ref->data.strong == 0 && ref->data.weak == 0) {
1592 binder_cleanup_ref_olocked(ref);
1599 * binder_get_node_from_ref() - get the node from the given proc/desc
1600 * @proc: proc containing the ref
1601 * @desc: the handle associated with the ref
1602 * @need_strong_ref: if true, only return node if ref is strong
1603 * @rdata: the id/refcount data for the ref
1605 * Given a proc and ref handle, return the associated binder_node
1607 * Return: a binder_node or NULL if not found or not strong when strong required
1609 static struct binder_node *binder_get_node_from_ref(
1610 struct binder_proc *proc,
1611 u32 desc, bool need_strong_ref,
1612 struct binder_ref_data *rdata)
1614 struct binder_node *node;
1615 struct binder_ref *ref;
1617 binder_proc_lock(proc);
1618 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1623 * Take an implicit reference on the node to ensure
1624 * it stays alive until the call to binder_put_node()
1626 binder_inc_node_tmpref(node);
1629 binder_proc_unlock(proc);
1634 binder_proc_unlock(proc);
1639 * binder_free_ref() - free the binder_ref
1642 * Free the binder_ref. Free the binder_node indicated by ref->node
1643 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1645 static void binder_free_ref(struct binder_ref *ref)
1648 binder_free_node(ref->node);
1654 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1655 * @proc: proc containing the ref
1656 * @desc: the handle associated with the ref
1657 * @increment: true=inc reference, false=dec reference
1658 * @strong: true=strong reference, false=weak reference
1659 * @rdata: the id/refcount data for the ref
1661 * Given a proc and ref handle, increment or decrement the ref
1662 * according to "increment" arg.
1664 * Return: 0 if successful, else errno
1666 static int binder_update_ref_for_handle(struct binder_proc *proc,
1667 uint32_t desc, bool increment, bool strong,
1668 struct binder_ref_data *rdata)
1671 struct binder_ref *ref;
1672 bool delete_ref = false;
1674 binder_proc_lock(proc);
1675 ref = binder_get_ref_olocked(proc, desc, strong);
1681 ret = binder_inc_ref_olocked(ref, strong, NULL);
1683 delete_ref = binder_dec_ref_olocked(ref, strong);
1687 binder_proc_unlock(proc);
1690 binder_free_ref(ref);
1694 binder_proc_unlock(proc);
1699 * binder_dec_ref_for_handle() - dec the ref for given handle
1700 * @proc: proc containing the ref
1701 * @desc: the handle associated with the ref
1702 * @strong: true=strong reference, false=weak reference
1703 * @rdata: the id/refcount data for the ref
1705 * Just calls binder_update_ref_for_handle() to decrement the ref.
1707 * Return: 0 if successful, else errno
1709 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1710 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1712 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1717 * binder_inc_ref_for_node() - increment the ref for given proc/node
1718 * @proc: proc containing the ref
1719 * @node: target node
1720 * @strong: true=strong reference, false=weak reference
1721 * @target_list: worklist to use if node is incremented
1722 * @rdata: the id/refcount data for the ref
1724 * Given a proc and node, increment the ref. Create the ref if it
1725 * doesn't already exist
1727 * Return: 0 if successful, else errno
1729 static int binder_inc_ref_for_node(struct binder_proc *proc,
1730 struct binder_node *node,
1732 struct list_head *target_list,
1733 struct binder_ref_data *rdata)
1735 struct binder_ref *ref;
1736 struct binder_ref *new_ref = NULL;
1739 binder_proc_lock(proc);
1740 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1742 binder_proc_unlock(proc);
1743 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1746 binder_proc_lock(proc);
1747 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1749 ret = binder_inc_ref_olocked(ref, strong, target_list);
1751 if (ret && ref == new_ref) {
1753 * Cleanup the failed reference here as the target
1754 * could now be dead and have already released its
1755 * references by now. Calling on the new reference
1756 * with strong=0 and a tmp_refs will not decrement
1757 * the node. The new_ref gets kfree'd below.
1759 binder_cleanup_ref_olocked(new_ref);
1763 binder_proc_unlock(proc);
1764 if (new_ref && ref != new_ref)
1766 * Another thread created the ref first so
1767 * free the one we allocated
1773 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1774 struct binder_transaction *t)
1776 BUG_ON(!target_thread);
1777 assert_spin_locked(&target_thread->proc->inner_lock);
1778 BUG_ON(target_thread->transaction_stack != t);
1779 BUG_ON(target_thread->transaction_stack->from != target_thread);
1780 target_thread->transaction_stack =
1781 target_thread->transaction_stack->from_parent;
1786 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1787 * @thread: thread to decrement
1789 * A thread needs to be kept alive while being used to create or
1790 * handle a transaction. binder_get_txn_from() is used to safely
1791 * extract t->from from a binder_transaction and keep the thread
1792 * indicated by t->from from being freed. When done with that
1793 * binder_thread, this function is called to decrement the
1794 * tmp_ref and free if appropriate (thread has been released
1795 * and no transaction being processed by the driver)
1797 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1800 * atomic is used to protect the counter value while
1801 * it cannot reach zero or thread->is_dead is false
1803 binder_inner_proc_lock(thread->proc);
1804 atomic_dec(&thread->tmp_ref);
1805 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1806 binder_inner_proc_unlock(thread->proc);
1807 binder_free_thread(thread);
1810 binder_inner_proc_unlock(thread->proc);
1814 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1815 * @proc: proc to decrement
1817 * A binder_proc needs to be kept alive while being used to create or
1818 * handle a transaction. proc->tmp_ref is incremented when
1819 * creating a new transaction or the binder_proc is currently in-use
1820 * by threads that are being released. When done with the binder_proc,
1821 * this function is called to decrement the counter and free the
1822 * proc if appropriate (proc has been released, all threads have
1823 * been released and not currenly in-use to process a transaction).
1825 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1827 binder_inner_proc_lock(proc);
1829 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1831 binder_inner_proc_unlock(proc);
1832 binder_free_proc(proc);
1835 binder_inner_proc_unlock(proc);
1839 * binder_get_txn_from() - safely extract the "from" thread in transaction
1840 * @t: binder transaction for t->from
1842 * Atomically return the "from" thread and increment the tmp_ref
1843 * count for the thread to ensure it stays alive until
1844 * binder_thread_dec_tmpref() is called.
1846 * Return: the value of t->from
1848 static struct binder_thread *binder_get_txn_from(
1849 struct binder_transaction *t)
1851 struct binder_thread *from;
1853 spin_lock(&t->lock);
1856 atomic_inc(&from->tmp_ref);
1857 spin_unlock(&t->lock);
1862 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1863 * @t: binder transaction for t->from
1865 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1866 * to guarantee that the thread cannot be released while operating on it.
1867 * The caller must call binder_inner_proc_unlock() to release the inner lock
1868 * as well as call binder_dec_thread_txn() to release the reference.
1870 * Return: the value of t->from
1872 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1873 struct binder_transaction *t)
1874 __acquires(&t->from->proc->inner_lock)
1876 struct binder_thread *from;
1878 from = binder_get_txn_from(t);
1880 __acquire(&from->proc->inner_lock);
1883 binder_inner_proc_lock(from->proc);
1885 BUG_ON(from != t->from);
1888 binder_inner_proc_unlock(from->proc);
1889 __acquire(&from->proc->inner_lock);
1890 binder_thread_dec_tmpref(from);
1895 * binder_free_txn_fixups() - free unprocessed fd fixups
1896 * @t: binder transaction for t->from
1898 * If the transaction is being torn down prior to being
1899 * processed by the target process, free all of the
1900 * fd fixups and fput the file structs. It is safe to
1901 * call this function after the fixups have been
1902 * processed -- in that case, the list will be empty.
1904 static void binder_free_txn_fixups(struct binder_transaction *t)
1906 struct binder_txn_fd_fixup *fixup, *tmp;
1908 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1910 list_del(&fixup->fixup_entry);
1915 static void binder_free_transaction(struct binder_transaction *t)
1917 struct binder_proc *target_proc = t->to_proc;
1920 binder_inner_proc_lock(target_proc);
1922 t->buffer->transaction = NULL;
1923 binder_inner_proc_unlock(target_proc);
1926 * If the transaction has no target_proc, then
1927 * t->buffer->transaction has already been cleared.
1929 binder_free_txn_fixups(t);
1931 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1934 static void binder_send_failed_reply(struct binder_transaction *t,
1935 uint32_t error_code)
1937 struct binder_thread *target_thread;
1938 struct binder_transaction *next;
1940 BUG_ON(t->flags & TF_ONE_WAY);
1942 target_thread = binder_get_txn_from_and_acq_inner(t);
1943 if (target_thread) {
1944 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1945 "send failed reply for transaction %d to %d:%d\n",
1947 target_thread->proc->pid,
1948 target_thread->pid);
1950 binder_pop_transaction_ilocked(target_thread, t);
1951 if (target_thread->reply_error.cmd == BR_OK) {
1952 target_thread->reply_error.cmd = error_code;
1953 binder_enqueue_thread_work_ilocked(
1955 &target_thread->reply_error.work);
1956 wake_up_interruptible(&target_thread->wait);
1959 * Cannot get here for normal operation, but
1960 * we can if multiple synchronous transactions
1961 * are sent without blocking for responses.
1962 * Just ignore the 2nd error in this case.
1964 pr_warn("Unexpected reply error: %u\n",
1965 target_thread->reply_error.cmd);
1967 binder_inner_proc_unlock(target_thread->proc);
1968 binder_thread_dec_tmpref(target_thread);
1969 binder_free_transaction(t);
1972 __release(&target_thread->proc->inner_lock);
1974 next = t->from_parent;
1976 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1977 "send failed reply for transaction %d, target dead\n",
1980 binder_free_transaction(t);
1982 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1983 "reply failed, no target thread at root\n");
1987 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1988 "reply failed, no target thread -- retry %d\n",
1994 * binder_cleanup_transaction() - cleans up undelivered transaction
1995 * @t: transaction that needs to be cleaned up
1996 * @reason: reason the transaction wasn't delivered
1997 * @error_code: error to return to caller (if synchronous call)
1999 static void binder_cleanup_transaction(struct binder_transaction *t,
2001 uint32_t error_code)
2003 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2004 binder_send_failed_reply(t, error_code);
2006 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2007 "undelivered transaction %d, %s\n",
2008 t->debug_id, reason);
2009 binder_free_transaction(t);
2014 * binder_get_object() - gets object and checks for valid metadata
2015 * @proc: binder_proc owning the buffer
2016 * @u: sender's user pointer to base of buffer
2017 * @buffer: binder_buffer that we're parsing.
2018 * @offset: offset in the @buffer at which to validate an object.
2019 * @object: struct binder_object to read into
2021 * Copy the binder object at the given offset into @object. If @u is
2022 * provided then the copy is from the sender's buffer. If not, then
2023 * it is copied from the target's @buffer.
2025 * Return: If there's a valid metadata object at @offset, the
2026 * size of that object. Otherwise, it returns zero. The object
2027 * is read into the struct binder_object pointed to by @object.
2029 static size_t binder_get_object(struct binder_proc *proc,
2030 const void __user *u,
2031 struct binder_buffer *buffer,
2032 unsigned long offset,
2033 struct binder_object *object)
2036 struct binder_object_header *hdr;
2037 size_t object_size = 0;
2039 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2040 if (offset > buffer->data_size || read_size < sizeof(*hdr))
2043 if (copy_from_user(object, u + offset, read_size))
2046 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2051 /* Ok, now see if we read a complete object. */
2053 switch (hdr->type) {
2054 case BINDER_TYPE_BINDER:
2055 case BINDER_TYPE_WEAK_BINDER:
2056 case BINDER_TYPE_HANDLE:
2057 case BINDER_TYPE_WEAK_HANDLE:
2058 object_size = sizeof(struct flat_binder_object);
2060 case BINDER_TYPE_FD:
2061 object_size = sizeof(struct binder_fd_object);
2063 case BINDER_TYPE_PTR:
2064 object_size = sizeof(struct binder_buffer_object);
2066 case BINDER_TYPE_FDA:
2067 object_size = sizeof(struct binder_fd_array_object);
2072 if (offset <= buffer->data_size - object_size &&
2073 buffer->data_size >= object_size)
2080 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2081 * @proc: binder_proc owning the buffer
2082 * @b: binder_buffer containing the object
2083 * @object: struct binder_object to read into
2084 * @index: index in offset array at which the binder_buffer_object is
2086 * @start_offset: points to the start of the offset array
2087 * @object_offsetp: offset of @object read from @b
2088 * @num_valid: the number of valid offsets in the offset array
2090 * Return: If @index is within the valid range of the offset array
2091 * described by @start and @num_valid, and if there's a valid
2092 * binder_buffer_object at the offset found in index @index
2093 * of the offset array, that object is returned. Otherwise,
2094 * %NULL is returned.
2095 * Note that the offset found in index @index itself is not
2096 * verified; this function assumes that @num_valid elements
2097 * from @start were previously verified to have valid offsets.
2098 * If @object_offsetp is non-NULL, then the offset within
2099 * @b is written to it.
2101 static struct binder_buffer_object *binder_validate_ptr(
2102 struct binder_proc *proc,
2103 struct binder_buffer *b,
2104 struct binder_object *object,
2105 binder_size_t index,
2106 binder_size_t start_offset,
2107 binder_size_t *object_offsetp,
2108 binder_size_t num_valid)
2111 binder_size_t object_offset;
2112 unsigned long buffer_offset;
2114 if (index >= num_valid)
2117 buffer_offset = start_offset + sizeof(binder_size_t) * index;
2118 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2120 sizeof(object_offset)))
2122 object_size = binder_get_object(proc, NULL, b, object_offset, object);
2123 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2126 *object_offsetp = object_offset;
2128 return &object->bbo;
2132 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2133 * @proc: binder_proc owning the buffer
2134 * @b: transaction buffer
2135 * @objects_start_offset: offset to start of objects buffer
2136 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
2137 * @fixup_offset: start offset in @buffer to fix up
2138 * @last_obj_offset: offset to last binder_buffer_object that we fixed
2139 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
2141 * Return: %true if a fixup in buffer @buffer at offset @offset is
2144 * For safety reasons, we only allow fixups inside a buffer to happen
2145 * at increasing offsets; additionally, we only allow fixup on the last
2146 * buffer object that was verified, or one of its parents.
2148 * Example of what is allowed:
2151 * B (parent = A, offset = 0)
2152 * C (parent = A, offset = 16)
2153 * D (parent = C, offset = 0)
2154 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2156 * Examples of what is not allowed:
2158 * Decreasing offsets within the same parent:
2160 * C (parent = A, offset = 16)
2161 * B (parent = A, offset = 0) // decreasing offset within A
2163 * Referring to a parent that wasn't the last object or any of its parents:
2165 * B (parent = A, offset = 0)
2166 * C (parent = A, offset = 0)
2167 * C (parent = A, offset = 16)
2168 * D (parent = B, offset = 0) // B is not A or any of A's parents
2170 static bool binder_validate_fixup(struct binder_proc *proc,
2171 struct binder_buffer *b,
2172 binder_size_t objects_start_offset,
2173 binder_size_t buffer_obj_offset,
2174 binder_size_t fixup_offset,
2175 binder_size_t last_obj_offset,
2176 binder_size_t last_min_offset)
2178 if (!last_obj_offset) {
2179 /* Nothing to fix up in */
2183 while (last_obj_offset != buffer_obj_offset) {
2184 unsigned long buffer_offset;
2185 struct binder_object last_object;
2186 struct binder_buffer_object *last_bbo;
2187 size_t object_size = binder_get_object(proc, NULL, b,
2190 if (object_size != sizeof(*last_bbo))
2193 last_bbo = &last_object.bbo;
2195 * Safe to retrieve the parent of last_obj, since it
2196 * was already previously verified by the driver.
2198 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2200 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2201 buffer_offset = objects_start_offset +
2202 sizeof(binder_size_t) * last_bbo->parent;
2203 if (binder_alloc_copy_from_buffer(&proc->alloc,
2206 sizeof(last_obj_offset)))
2209 return (fixup_offset >= last_min_offset);
2213 * struct binder_task_work_cb - for deferred close
2215 * @twork: callback_head for task work
2218 * Structure to pass task work to be handled after
2219 * returning from binder_ioctl() via task_work_add().
2221 struct binder_task_work_cb {
2222 struct callback_head twork;
2227 * binder_do_fd_close() - close list of file descriptors
2228 * @twork: callback head for task work
2230 * It is not safe to call ksys_close() during the binder_ioctl()
2231 * function if there is a chance that binder's own file descriptor
2232 * might be closed. This is to meet the requirements for using
2233 * fdget() (see comments for __fget_light()). Therefore use
2234 * task_work_add() to schedule the close operation once we have
2235 * returned from binder_ioctl(). This function is a callback
2236 * for that mechanism and does the actual ksys_close() on the
2237 * given file descriptor.
2239 static void binder_do_fd_close(struct callback_head *twork)
2241 struct binder_task_work_cb *twcb = container_of(twork,
2242 struct binder_task_work_cb, twork);
2249 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2250 * @fd: file-descriptor to close
2252 * See comments in binder_do_fd_close(). This function is used to schedule
2253 * a file-descriptor to be closed after returning from binder_ioctl().
2255 static void binder_deferred_fd_close(int fd)
2257 struct binder_task_work_cb *twcb;
2259 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2262 init_task_work(&twcb->twork, binder_do_fd_close);
2263 __close_fd_get_file(fd, &twcb->file);
2265 task_work_add(current, &twcb->twork, true);
2270 static void binder_transaction_buffer_release(struct binder_proc *proc,
2271 struct binder_thread *thread,
2272 struct binder_buffer *buffer,
2273 binder_size_t failed_at,
2276 int debug_id = buffer->debug_id;
2277 binder_size_t off_start_offset, buffer_offset, off_end_offset;
2279 binder_debug(BINDER_DEBUG_TRANSACTION,
2280 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2281 proc->pid, buffer->debug_id,
2282 buffer->data_size, buffer->offsets_size,
2283 (unsigned long long)failed_at);
2285 if (buffer->target_node)
2286 binder_dec_node(buffer->target_node, 1, 0);
2288 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2289 off_end_offset = is_failure && failed_at ? failed_at :
2290 off_start_offset + buffer->offsets_size;
2291 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2292 buffer_offset += sizeof(binder_size_t)) {
2293 struct binder_object_header *hdr;
2294 size_t object_size = 0;
2295 struct binder_object object;
2296 binder_size_t object_offset;
2298 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2299 buffer, buffer_offset,
2300 sizeof(object_offset)))
2301 object_size = binder_get_object(proc, NULL, buffer,
2302 object_offset, &object);
2303 if (object_size == 0) {
2304 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2305 debug_id, (u64)object_offset, buffer->data_size);
2309 switch (hdr->type) {
2310 case BINDER_TYPE_BINDER:
2311 case BINDER_TYPE_WEAK_BINDER: {
2312 struct flat_binder_object *fp;
2313 struct binder_node *node;
2315 fp = to_flat_binder_object(hdr);
2316 node = binder_get_node(proc, fp->binder);
2318 pr_err("transaction release %d bad node %016llx\n",
2319 debug_id, (u64)fp->binder);
2322 binder_debug(BINDER_DEBUG_TRANSACTION,
2323 " node %d u%016llx\n",
2324 node->debug_id, (u64)node->ptr);
2325 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2327 binder_put_node(node);
2329 case BINDER_TYPE_HANDLE:
2330 case BINDER_TYPE_WEAK_HANDLE: {
2331 struct flat_binder_object *fp;
2332 struct binder_ref_data rdata;
2335 fp = to_flat_binder_object(hdr);
2336 ret = binder_dec_ref_for_handle(proc, fp->handle,
2337 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2340 pr_err("transaction release %d bad handle %d, ret = %d\n",
2341 debug_id, fp->handle, ret);
2344 binder_debug(BINDER_DEBUG_TRANSACTION,
2345 " ref %d desc %d\n",
2346 rdata.debug_id, rdata.desc);
2349 case BINDER_TYPE_FD: {
2351 * No need to close the file here since user-space
2352 * closes it for for successfully delivered
2353 * transactions. For transactions that weren't
2354 * delivered, the new fd was never allocated so
2355 * there is no need to close and the fput on the
2356 * file is done when the transaction is torn
2360 case BINDER_TYPE_PTR:
2362 * Nothing to do here, this will get cleaned up when the
2363 * transaction buffer gets freed
2366 case BINDER_TYPE_FDA: {
2367 struct binder_fd_array_object *fda;
2368 struct binder_buffer_object *parent;
2369 struct binder_object ptr_object;
2370 binder_size_t fda_offset;
2372 binder_size_t fd_buf_size;
2373 binder_size_t num_valid;
2377 * The fd fixups have not been applied so no
2378 * fds need to be closed.
2383 num_valid = (buffer_offset - off_start_offset) /
2384 sizeof(binder_size_t);
2385 fda = to_binder_fd_array_object(hdr);
2386 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2392 pr_err("transaction release %d bad parent offset\n",
2396 fd_buf_size = sizeof(u32) * fda->num_fds;
2397 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2398 pr_err("transaction release %d invalid number of fds (%lld)\n",
2399 debug_id, (u64)fda->num_fds);
2402 if (fd_buf_size > parent->length ||
2403 fda->parent_offset > parent->length - fd_buf_size) {
2404 /* No space for all file descriptors here. */
2405 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2406 debug_id, (u64)fda->num_fds);
2410 * the source data for binder_buffer_object is visible
2411 * to user-space and the @buffer element is the user
2412 * pointer to the buffer_object containing the fd_array.
2413 * Convert the address to an offset relative to
2414 * the base of the transaction buffer.
2417 (parent->buffer - (uintptr_t)buffer->user_data) +
2419 for (fd_index = 0; fd_index < fda->num_fds;
2423 binder_size_t offset = fda_offset +
2424 fd_index * sizeof(fd);
2426 err = binder_alloc_copy_from_buffer(
2427 &proc->alloc, &fd, buffer,
2428 offset, sizeof(fd));
2431 binder_deferred_fd_close(fd);
2433 * Need to make sure the thread goes
2434 * back to userspace to complete the
2438 thread->looper_need_return = true;
2443 pr_err("transaction release %d bad object type %x\n",
2444 debug_id, hdr->type);
2450 static int binder_translate_binder(struct flat_binder_object *fp,
2451 struct binder_transaction *t,
2452 struct binder_thread *thread)
2454 struct binder_node *node;
2455 struct binder_proc *proc = thread->proc;
2456 struct binder_proc *target_proc = t->to_proc;
2457 struct binder_ref_data rdata;
2460 node = binder_get_node(proc, fp->binder);
2462 node = binder_new_node(proc, fp);
2466 if (fp->cookie != node->cookie) {
2467 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2468 proc->pid, thread->pid, (u64)fp->binder,
2469 node->debug_id, (u64)fp->cookie,
2474 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2479 ret = binder_inc_ref_for_node(target_proc, node,
2480 fp->hdr.type == BINDER_TYPE_BINDER,
2481 &thread->todo, &rdata);
2485 if (fp->hdr.type == BINDER_TYPE_BINDER)
2486 fp->hdr.type = BINDER_TYPE_HANDLE;
2488 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2490 fp->handle = rdata.desc;
2493 trace_binder_transaction_node_to_ref(t, node, &rdata);
2494 binder_debug(BINDER_DEBUG_TRANSACTION,
2495 " node %d u%016llx -> ref %d desc %d\n",
2496 node->debug_id, (u64)node->ptr,
2497 rdata.debug_id, rdata.desc);
2499 binder_put_node(node);
2503 static int binder_translate_handle(struct flat_binder_object *fp,
2504 struct binder_transaction *t,
2505 struct binder_thread *thread)
2507 struct binder_proc *proc = thread->proc;
2508 struct binder_proc *target_proc = t->to_proc;
2509 struct binder_node *node;
2510 struct binder_ref_data src_rdata;
2513 node = binder_get_node_from_ref(proc, fp->handle,
2514 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2516 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2517 proc->pid, thread->pid, fp->handle);
2520 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2525 binder_node_lock(node);
2526 if (node->proc == target_proc) {
2527 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2528 fp->hdr.type = BINDER_TYPE_BINDER;
2530 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2531 fp->binder = node->ptr;
2532 fp->cookie = node->cookie;
2534 binder_inner_proc_lock(node->proc);
2536 __acquire(&node->proc->inner_lock);
2537 binder_inc_node_nilocked(node,
2538 fp->hdr.type == BINDER_TYPE_BINDER,
2541 binder_inner_proc_unlock(node->proc);
2543 __release(&node->proc->inner_lock);
2544 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2545 binder_debug(BINDER_DEBUG_TRANSACTION,
2546 " ref %d desc %d -> node %d u%016llx\n",
2547 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2549 binder_node_unlock(node);
2551 struct binder_ref_data dest_rdata;
2553 binder_node_unlock(node);
2554 ret = binder_inc_ref_for_node(target_proc, node,
2555 fp->hdr.type == BINDER_TYPE_HANDLE,
2561 fp->handle = dest_rdata.desc;
2563 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2565 binder_debug(BINDER_DEBUG_TRANSACTION,
2566 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2567 src_rdata.debug_id, src_rdata.desc,
2568 dest_rdata.debug_id, dest_rdata.desc,
2572 binder_put_node(node);
2576 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2577 struct binder_transaction *t,
2578 struct binder_thread *thread,
2579 struct binder_transaction *in_reply_to)
2581 struct binder_proc *proc = thread->proc;
2582 struct binder_proc *target_proc = t->to_proc;
2583 struct binder_txn_fd_fixup *fixup;
2586 bool target_allows_fd;
2589 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2591 target_allows_fd = t->buffer->target_node->accept_fds;
2592 if (!target_allows_fd) {
2593 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2594 proc->pid, thread->pid,
2595 in_reply_to ? "reply" : "transaction",
2598 goto err_fd_not_accepted;
2603 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2604 proc->pid, thread->pid, fd);
2608 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2615 * Add fixup record for this transaction. The allocation
2616 * of the fd in the target needs to be done from a
2619 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2625 fixup->offset = fd_offset;
2626 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2627 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2635 err_fd_not_accepted:
2640 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2641 * @offset offset in target buffer to fixup
2642 * @skip_size bytes to skip in copy (fixup will be written later)
2643 * @fixup_data data to write at fixup offset
2646 * This is used for the pointer fixup list (pf) which is created and consumed
2647 * during binder_transaction() and is only accessed locally. No
2648 * locking is necessary.
2650 * The list is ordered by @offset.
2652 struct binder_ptr_fixup {
2653 binder_size_t offset;
2655 binder_uintptr_t fixup_data;
2656 struct list_head node;
2660 * struct binder_sg_copy - scatter-gather data to be copied
2661 * @offset offset in target buffer
2662 * @sender_uaddr user address in source buffer
2663 * @length bytes to copy
2666 * This is used for the sg copy list (sgc) which is created and consumed
2667 * during binder_transaction() and is only accessed locally. No
2668 * locking is necessary.
2670 * The list is ordered by @offset.
2672 struct binder_sg_copy {
2673 binder_size_t offset;
2674 const void __user *sender_uaddr;
2676 struct list_head node;
2680 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2681 * @alloc: binder_alloc associated with @buffer
2682 * @buffer: binder buffer in target process
2683 * @sgc_head: list_head of scatter-gather copy list
2684 * @pf_head: list_head of pointer fixup list
2686 * Processes all elements of @sgc_head, applying fixups from @pf_head
2687 * and copying the scatter-gather data from the source process' user
2688 * buffer to the target's buffer. It is expected that the list creation
2689 * and processing all occurs during binder_transaction() so these lists
2690 * are only accessed in local context.
2692 * Return: 0=success, else -errno
2694 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2695 struct binder_buffer *buffer,
2696 struct list_head *sgc_head,
2697 struct list_head *pf_head)
2700 struct binder_sg_copy *sgc, *tmpsgc;
2701 struct binder_ptr_fixup *tmppf;
2702 struct binder_ptr_fixup *pf =
2703 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2706 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2707 size_t bytes_copied = 0;
2709 while (bytes_copied < sgc->length) {
2711 size_t bytes_left = sgc->length - bytes_copied;
2712 size_t offset = sgc->offset + bytes_copied;
2715 * We copy up to the fixup (pointed to by pf)
2717 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2719 if (!ret && copy_size)
2720 ret = binder_alloc_copy_user_to_buffer(
2723 sgc->sender_uaddr + bytes_copied,
2725 bytes_copied += copy_size;
2726 if (copy_size != bytes_left) {
2728 /* we stopped at a fixup offset */
2729 if (pf->skip_size) {
2731 * we are just skipping. This is for
2732 * BINDER_TYPE_FDA where the translated
2733 * fds will be fixed up when we get
2734 * to target context.
2736 bytes_copied += pf->skip_size;
2738 /* apply the fixup indicated by pf */
2740 ret = binder_alloc_copy_to_buffer(
2744 sizeof(pf->fixup_data));
2745 bytes_copied += sizeof(pf->fixup_data);
2747 list_del(&pf->node);
2749 pf = list_first_entry_or_null(pf_head,
2750 struct binder_ptr_fixup, node);
2753 list_del(&sgc->node);
2756 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2757 BUG_ON(pf->skip_size == 0);
2758 list_del(&pf->node);
2761 BUG_ON(!list_empty(sgc_head));
2763 return ret > 0 ? -EINVAL : ret;
2767 * binder_cleanup_deferred_txn_lists() - free specified lists
2768 * @sgc_head: list_head of scatter-gather copy list
2769 * @pf_head: list_head of pointer fixup list
2771 * Called to clean up @sgc_head and @pf_head if there is an
2774 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2775 struct list_head *pf_head)
2777 struct binder_sg_copy *sgc, *tmpsgc;
2778 struct binder_ptr_fixup *pf, *tmppf;
2780 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2781 list_del(&sgc->node);
2784 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2785 list_del(&pf->node);
2791 * binder_defer_copy() - queue a scatter-gather buffer for copy
2792 * @sgc_head: list_head of scatter-gather copy list
2793 * @offset: binder buffer offset in target process
2794 * @sender_uaddr: user address in source process
2795 * @length: bytes to copy
2797 * Specify a scatter-gather block to be copied. The actual copy must
2798 * be deferred until all the needed fixups are identified and queued.
2799 * Then the copy and fixups are done together so un-translated values
2800 * from the source are never visible in the target buffer.
2802 * We are guaranteed that repeated calls to this function will have
2803 * monotonically increasing @offset values so the list will naturally
2806 * Return: 0=success, else -errno
2808 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2809 const void __user *sender_uaddr, size_t length)
2811 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2816 bc->offset = offset;
2817 bc->sender_uaddr = sender_uaddr;
2818 bc->length = length;
2819 INIT_LIST_HEAD(&bc->node);
2822 * We are guaranteed that the deferred copies are in-order
2823 * so just add to the tail.
2825 list_add_tail(&bc->node, sgc_head);
2831 * binder_add_fixup() - queue a fixup to be applied to sg copy
2832 * @pf_head: list_head of binder ptr fixup list
2833 * @offset: binder buffer offset in target process
2834 * @fixup: bytes to be copied for fixup
2835 * @skip_size: bytes to skip when copying (fixup will be applied later)
2837 * Add the specified fixup to a list ordered by @offset. When copying
2838 * the scatter-gather buffers, the fixup will be copied instead of
2839 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2840 * will be applied later (in target process context), so we just skip
2841 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2844 * This function is called *mostly* in @offset order, but there are
2845 * exceptions. Since out-of-order inserts are relatively uncommon,
2846 * we insert the new element by searching backward from the tail of
2849 * Return: 0=success, else -errno
2851 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2852 binder_uintptr_t fixup, size_t skip_size)
2854 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2855 struct binder_ptr_fixup *tmppf;
2860 pf->offset = offset;
2861 pf->fixup_data = fixup;
2862 pf->skip_size = skip_size;
2863 INIT_LIST_HEAD(&pf->node);
2865 /* Fixups are *mostly* added in-order, but there are some
2866 * exceptions. Look backwards through list for insertion point.
2868 list_for_each_entry_reverse(tmppf, pf_head, node) {
2869 if (tmppf->offset < pf->offset) {
2870 list_add(&pf->node, &tmppf->node);
2875 * if we get here, then the new offset is the lowest so
2876 * insert at the head
2878 list_add(&pf->node, pf_head);
2882 static int binder_translate_fd_array(struct list_head *pf_head,
2883 struct binder_fd_array_object *fda,
2884 const void __user *sender_ubuffer,
2885 struct binder_buffer_object *parent,
2886 struct binder_buffer_object *sender_uparent,
2887 struct binder_transaction *t,
2888 struct binder_thread *thread,
2889 struct binder_transaction *in_reply_to)
2891 binder_size_t fdi, fd_buf_size;
2892 binder_size_t fda_offset;
2893 const void __user *sender_ufda_base;
2894 struct binder_proc *proc = thread->proc;
2897 if (fda->num_fds == 0)
2900 fd_buf_size = sizeof(u32) * fda->num_fds;
2901 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2902 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2903 proc->pid, thread->pid, (u64)fda->num_fds);
2906 if (fd_buf_size > parent->length ||
2907 fda->parent_offset > parent->length - fd_buf_size) {
2908 /* No space for all file descriptors here. */
2909 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2910 proc->pid, thread->pid, (u64)fda->num_fds);
2914 * the source data for binder_buffer_object is visible
2915 * to user-space and the @buffer element is the user
2916 * pointer to the buffer_object containing the fd_array.
2917 * Convert the address to an offset relative to
2918 * the base of the transaction buffer.
2920 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2922 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2925 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2926 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2927 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2928 proc->pid, thread->pid);
2931 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2935 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2937 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2938 binder_size_t sender_uoffset = fdi * sizeof(fd);
2940 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2942 ret = binder_translate_fd(fd, offset, t, thread,
2945 return ret > 0 ? -EINVAL : ret;
2950 static int binder_fixup_parent(struct list_head *pf_head,
2951 struct binder_transaction *t,
2952 struct binder_thread *thread,
2953 struct binder_buffer_object *bp,
2954 binder_size_t off_start_offset,
2955 binder_size_t num_valid,
2956 binder_size_t last_fixup_obj_off,
2957 binder_size_t last_fixup_min_off)
2959 struct binder_buffer_object *parent;
2960 struct binder_buffer *b = t->buffer;
2961 struct binder_proc *proc = thread->proc;
2962 struct binder_proc *target_proc = t->to_proc;
2963 struct binder_object object;
2964 binder_size_t buffer_offset;
2965 binder_size_t parent_offset;
2967 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2970 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2971 off_start_offset, &parent_offset,
2974 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2975 proc->pid, thread->pid);
2979 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2980 parent_offset, bp->parent_offset,
2982 last_fixup_min_off)) {
2983 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2984 proc->pid, thread->pid);
2988 if (parent->length < sizeof(binder_uintptr_t) ||
2989 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2990 /* No space for a pointer here! */
2991 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2992 proc->pid, thread->pid);
2995 buffer_offset = bp->parent_offset +
2996 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2997 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
3001 * binder_proc_transaction() - sends a transaction to a process and wakes it up
3002 * @t: transaction to send
3003 * @proc: process to send the transaction to
3004 * @thread: thread in @proc to send the transaction to (may be NULL)
3006 * This function queues a transaction to the specified process. It will try
3007 * to find a thread in the target process to handle the transaction and
3008 * wake it up. If no thread is found, the work is queued to the proc
3011 * If the @thread parameter is not NULL, the transaction is always queued
3012 * to the waitlist of that specific thread.
3014 * Return: true if the transactions was successfully queued
3015 * false if the target process or thread is dead
3017 static bool binder_proc_transaction(struct binder_transaction *t,
3018 struct binder_proc *proc,
3019 struct binder_thread *thread)
3021 struct binder_node *node = t->buffer->target_node;
3022 bool oneway = !!(t->flags & TF_ONE_WAY);
3023 bool pending_async = false;
3026 binder_node_lock(node);
3029 if (node->has_async_transaction) {
3030 pending_async = true;
3032 node->has_async_transaction = true;
3036 binder_inner_proc_lock(proc);
3038 if (proc->is_dead || (thread && thread->is_dead)) {
3039 binder_inner_proc_unlock(proc);
3040 binder_node_unlock(node);
3044 if (!thread && !pending_async)
3045 thread = binder_select_thread_ilocked(proc);
3048 binder_enqueue_thread_work_ilocked(thread, &t->work);
3049 else if (!pending_async)
3050 binder_enqueue_work_ilocked(&t->work, &proc->todo);
3052 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
3055 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
3057 binder_inner_proc_unlock(proc);
3058 binder_node_unlock(node);
3064 * binder_get_node_refs_for_txn() - Get required refs on node for txn
3065 * @node: struct binder_node for which to get refs
3066 * @proc: returns @node->proc if valid
3067 * @error: if no @proc then returns BR_DEAD_REPLY
3069 * User-space normally keeps the node alive when creating a transaction
3070 * since it has a reference to the target. The local strong ref keeps it
3071 * alive if the sending process dies before the target process processes
3072 * the transaction. If the source process is malicious or has a reference
3073 * counting bug, relying on the local strong ref can fail.
3075 * Since user-space can cause the local strong ref to go away, we also take
3076 * a tmpref on the node to ensure it survives while we are constructing
3077 * the transaction. We also need a tmpref on the proc while we are
3078 * constructing the transaction, so we take that here as well.
3080 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
3081 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
3082 * target proc has died, @error is set to BR_DEAD_REPLY
3084 static struct binder_node *binder_get_node_refs_for_txn(
3085 struct binder_node *node,
3086 struct binder_proc **procp,
3089 struct binder_node *target_node = NULL;
3091 binder_node_inner_lock(node);
3094 binder_inc_node_nilocked(node, 1, 0, NULL);
3095 binder_inc_node_tmpref_ilocked(node);
3096 node->proc->tmp_ref++;
3097 *procp = node->proc;
3099 *error = BR_DEAD_REPLY;
3100 binder_node_inner_unlock(node);
3105 static void binder_transaction(struct binder_proc *proc,
3106 struct binder_thread *thread,
3107 struct binder_transaction_data *tr, int reply,
3108 binder_size_t extra_buffers_size)
3111 struct binder_transaction *t;
3112 struct binder_work *w;
3113 struct binder_work *tcomplete;
3114 binder_size_t buffer_offset = 0;
3115 binder_size_t off_start_offset, off_end_offset;
3116 binder_size_t off_min;
3117 binder_size_t sg_buf_offset, sg_buf_end_offset;
3118 binder_size_t user_offset = 0;
3119 struct binder_proc *target_proc = NULL;
3120 struct binder_thread *target_thread = NULL;
3121 struct binder_node *target_node = NULL;
3122 struct binder_transaction *in_reply_to = NULL;
3123 struct binder_transaction_log_entry *e;
3124 uint32_t return_error = 0;
3125 uint32_t return_error_param = 0;
3126 uint32_t return_error_line = 0;
3127 binder_size_t last_fixup_obj_off = 0;
3128 binder_size_t last_fixup_min_off = 0;
3129 struct binder_context *context = proc->context;
3130 int t_debug_id = atomic_inc_return(&binder_last_id);
3131 char *secctx = NULL;
3133 struct list_head sgc_head;
3134 struct list_head pf_head;
3135 const void __user *user_buffer = (const void __user *)
3136 (uintptr_t)tr->data.ptr.buffer;
3137 INIT_LIST_HEAD(&sgc_head);
3138 INIT_LIST_HEAD(&pf_head);
3140 e = binder_transaction_log_add(&binder_transaction_log);
3141 e->debug_id = t_debug_id;
3142 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3143 e->from_proc = proc->pid;
3144 e->from_thread = thread->pid;
3145 e->target_handle = tr->target.handle;
3146 e->data_size = tr->data_size;
3147 e->offsets_size = tr->offsets_size;
3148 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
3151 binder_inner_proc_lock(proc);
3152 in_reply_to = thread->transaction_stack;
3153 if (in_reply_to == NULL) {
3154 binder_inner_proc_unlock(proc);
3155 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3156 proc->pid, thread->pid);
3157 return_error = BR_FAILED_REPLY;
3158 return_error_param = -EPROTO;
3159 return_error_line = __LINE__;
3160 goto err_empty_call_stack;
3162 if (in_reply_to->to_thread != thread) {
3163 spin_lock(&in_reply_to->lock);
3164 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3165 proc->pid, thread->pid, in_reply_to->debug_id,
3166 in_reply_to->to_proc ?
3167 in_reply_to->to_proc->pid : 0,
3168 in_reply_to->to_thread ?
3169 in_reply_to->to_thread->pid : 0);
3170 spin_unlock(&in_reply_to->lock);
3171 binder_inner_proc_unlock(proc);
3172 return_error = BR_FAILED_REPLY;
3173 return_error_param = -EPROTO;
3174 return_error_line = __LINE__;
3176 goto err_bad_call_stack;
3178 thread->transaction_stack = in_reply_to->to_parent;
3179 binder_inner_proc_unlock(proc);
3180 binder_set_nice(in_reply_to->saved_priority);
3181 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3182 if (target_thread == NULL) {
3183 /* annotation for sparse */
3184 __release(&target_thread->proc->inner_lock);
3185 return_error = BR_DEAD_REPLY;
3186 return_error_line = __LINE__;
3187 goto err_dead_binder;
3189 if (target_thread->transaction_stack != in_reply_to) {
3190 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3191 proc->pid, thread->pid,
3192 target_thread->transaction_stack ?
3193 target_thread->transaction_stack->debug_id : 0,
3194 in_reply_to->debug_id);
3195 binder_inner_proc_unlock(target_thread->proc);
3196 return_error = BR_FAILED_REPLY;
3197 return_error_param = -EPROTO;
3198 return_error_line = __LINE__;
3200 target_thread = NULL;
3201 goto err_dead_binder;
3203 target_proc = target_thread->proc;
3204 target_proc->tmp_ref++;
3205 binder_inner_proc_unlock(target_thread->proc);
3207 if (tr->target.handle) {
3208 struct binder_ref *ref;
3211 * There must already be a strong ref
3212 * on this node. If so, do a strong
3213 * increment on the node to ensure it
3214 * stays alive until the transaction is
3217 binder_proc_lock(proc);
3218 ref = binder_get_ref_olocked(proc, tr->target.handle,
3221 target_node = binder_get_node_refs_for_txn(
3222 ref->node, &target_proc,
3225 binder_user_error("%d:%d got transaction to invalid handle\n",
3226 proc->pid, thread->pid);
3227 return_error = BR_FAILED_REPLY;
3229 binder_proc_unlock(proc);
3231 mutex_lock(&context->context_mgr_node_lock);
3232 target_node = context->binder_context_mgr_node;
3234 target_node = binder_get_node_refs_for_txn(
3235 target_node, &target_proc,
3238 return_error = BR_DEAD_REPLY;
3239 mutex_unlock(&context->context_mgr_node_lock);
3240 if (target_node && target_proc->pid == proc->pid) {
3241 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3242 proc->pid, thread->pid);
3243 return_error = BR_FAILED_REPLY;
3244 return_error_param = -EINVAL;
3245 return_error_line = __LINE__;
3246 goto err_invalid_target_handle;
3251 * return_error is set above
3253 return_error_param = -EINVAL;
3254 return_error_line = __LINE__;
3255 goto err_dead_binder;
3257 e->to_node = target_node->debug_id;
3258 if (WARN_ON(proc == target_proc)) {
3259 return_error = BR_FAILED_REPLY;
3260 return_error_param = -EINVAL;
3261 return_error_line = __LINE__;
3262 goto err_invalid_target_handle;
3264 if (security_binder_transaction(proc->cred,
3265 target_proc->cred) < 0) {
3266 return_error = BR_FAILED_REPLY;
3267 return_error_param = -EPERM;
3268 return_error_line = __LINE__;
3269 goto err_invalid_target_handle;
3271 binder_inner_proc_lock(proc);
3273 w = list_first_entry_or_null(&thread->todo,
3274 struct binder_work, entry);
3275 if (!(tr->flags & TF_ONE_WAY) && w &&
3276 w->type == BINDER_WORK_TRANSACTION) {
3278 * Do not allow new outgoing transaction from a
3279 * thread that has a transaction at the head of
3280 * its todo list. Only need to check the head
3281 * because binder_select_thread_ilocked picks a
3282 * thread from proc->waiting_threads to enqueue
3283 * the transaction, and nothing is queued to the
3284 * todo list while the thread is on waiting_threads.
3286 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3287 proc->pid, thread->pid);
3288 binder_inner_proc_unlock(proc);
3289 return_error = BR_FAILED_REPLY;
3290 return_error_param = -EPROTO;
3291 return_error_line = __LINE__;
3292 goto err_bad_todo_list;
3295 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3296 struct binder_transaction *tmp;
3298 tmp = thread->transaction_stack;
3299 if (tmp->to_thread != thread) {
3300 spin_lock(&tmp->lock);
3301 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3302 proc->pid, thread->pid, tmp->debug_id,
3303 tmp->to_proc ? tmp->to_proc->pid : 0,
3305 tmp->to_thread->pid : 0);
3306 spin_unlock(&tmp->lock);
3307 binder_inner_proc_unlock(proc);
3308 return_error = BR_FAILED_REPLY;
3309 return_error_param = -EPROTO;
3310 return_error_line = __LINE__;
3311 goto err_bad_call_stack;
3314 struct binder_thread *from;
3316 spin_lock(&tmp->lock);
3318 if (from && from->proc == target_proc) {
3319 atomic_inc(&from->tmp_ref);
3320 target_thread = from;
3321 spin_unlock(&tmp->lock);
3324 spin_unlock(&tmp->lock);
3325 tmp = tmp->from_parent;
3328 binder_inner_proc_unlock(proc);
3331 e->to_thread = target_thread->pid;
3332 e->to_proc = target_proc->pid;
3334 /* TODO: reuse incoming transaction for reply */
3335 t = kzalloc(sizeof(*t), GFP_KERNEL);
3337 return_error = BR_FAILED_REPLY;
3338 return_error_param = -ENOMEM;
3339 return_error_line = __LINE__;
3340 goto err_alloc_t_failed;
3342 INIT_LIST_HEAD(&t->fd_fixups);
3343 binder_stats_created(BINDER_STAT_TRANSACTION);
3344 spin_lock_init(&t->lock);
3346 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3347 if (tcomplete == NULL) {
3348 return_error = BR_FAILED_REPLY;
3349 return_error_param = -ENOMEM;
3350 return_error_line = __LINE__;
3351 goto err_alloc_tcomplete_failed;
3353 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3355 t->debug_id = t_debug_id;
3358 binder_debug(BINDER_DEBUG_TRANSACTION,
3359 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3360 proc->pid, thread->pid, t->debug_id,
3361 target_proc->pid, target_thread->pid,
3362 (u64)tr->data.ptr.buffer,
3363 (u64)tr->data.ptr.offsets,
3364 (u64)tr->data_size, (u64)tr->offsets_size,
3365 (u64)extra_buffers_size);
3367 binder_debug(BINDER_DEBUG_TRANSACTION,
3368 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3369 proc->pid, thread->pid, t->debug_id,
3370 target_proc->pid, target_node->debug_id,
3371 (u64)tr->data.ptr.buffer,
3372 (u64)tr->data.ptr.offsets,
3373 (u64)tr->data_size, (u64)tr->offsets_size,
3374 (u64)extra_buffers_size);
3376 if (!reply && !(tr->flags & TF_ONE_WAY))
3380 t->sender_euid = task_euid(proc->tsk);
3381 t->to_proc = target_proc;
3382 t->to_thread = target_thread;
3384 t->flags = tr->flags;
3385 t->priority = task_nice(current);
3387 if (target_node && target_node->txn_security_ctx) {
3391 security_cred_getsecid(proc->cred, &secid);
3392 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3394 return_error = BR_FAILED_REPLY;
3395 return_error_param = ret;
3396 return_error_line = __LINE__;
3397 goto err_get_secctx_failed;
3399 added_size = ALIGN(secctx_sz, sizeof(u64));
3400 extra_buffers_size += added_size;
3401 if (extra_buffers_size < added_size) {
3402 /* integer overflow of extra_buffers_size */
3403 return_error = BR_FAILED_REPLY;
3404 return_error_param = EINVAL;
3405 return_error_line = __LINE__;
3406 goto err_bad_extra_size;
3410 trace_binder_transaction(reply, t, target_node);
3412 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3413 tr->offsets_size, extra_buffers_size,
3414 !reply && (t->flags & TF_ONE_WAY));
3415 if (IS_ERR(t->buffer)) {
3417 * -ESRCH indicates VMA cleared. The target is dying.
3419 return_error_param = PTR_ERR(t->buffer);
3420 return_error = return_error_param == -ESRCH ?
3421 BR_DEAD_REPLY : BR_FAILED_REPLY;
3422 return_error_line = __LINE__;
3424 goto err_binder_alloc_buf_failed;
3428 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3429 ALIGN(tr->offsets_size, sizeof(void *)) +
3430 ALIGN(extra_buffers_size, sizeof(void *)) -
3431 ALIGN(secctx_sz, sizeof(u64));
3433 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3434 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3435 t->buffer, buf_offset,
3438 t->security_ctx = 0;
3441 security_release_secctx(secctx, secctx_sz);
3444 t->buffer->debug_id = t->debug_id;
3445 t->buffer->transaction = t;
3446 t->buffer->target_node = target_node;
3447 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3448 trace_binder_transaction_alloc_buf(t->buffer);
3450 if (binder_alloc_copy_user_to_buffer(
3451 &target_proc->alloc,
3453 ALIGN(tr->data_size, sizeof(void *)),
3454 (const void __user *)
3455 (uintptr_t)tr->data.ptr.offsets,
3456 tr->offsets_size)) {
3457 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3458 proc->pid, thread->pid);
3459 return_error = BR_FAILED_REPLY;
3460 return_error_param = -EFAULT;
3461 return_error_line = __LINE__;
3462 goto err_copy_data_failed;
3464 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3465 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3466 proc->pid, thread->pid, (u64)tr->offsets_size);
3467 return_error = BR_FAILED_REPLY;
3468 return_error_param = -EINVAL;
3469 return_error_line = __LINE__;
3470 goto err_bad_offset;
3472 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3473 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3474 proc->pid, thread->pid,
3475 (u64)extra_buffers_size);
3476 return_error = BR_FAILED_REPLY;
3477 return_error_param = -EINVAL;
3478 return_error_line = __LINE__;
3479 goto err_bad_offset;
3481 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3482 buffer_offset = off_start_offset;
3483 off_end_offset = off_start_offset + tr->offsets_size;
3484 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3485 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3486 ALIGN(secctx_sz, sizeof(u64));
3488 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3489 buffer_offset += sizeof(binder_size_t)) {
3490 struct binder_object_header *hdr;
3492 struct binder_object object;
3493 binder_size_t object_offset;
3494 binder_size_t copy_size;
3496 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3500 sizeof(object_offset))) {
3501 return_error = BR_FAILED_REPLY;
3502 return_error_param = -EINVAL;
3503 return_error_line = __LINE__;
3504 goto err_bad_offset;
3508 * Copy the source user buffer up to the next object
3509 * that will be processed.
3511 copy_size = object_offset - user_offset;
3512 if (copy_size && (user_offset > object_offset ||
3513 binder_alloc_copy_user_to_buffer(
3514 &target_proc->alloc,
3515 t->buffer, user_offset,
3516 user_buffer + user_offset,
3518 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3519 proc->pid, thread->pid);
3520 return_error = BR_FAILED_REPLY;
3521 return_error_param = -EFAULT;
3522 return_error_line = __LINE__;
3523 goto err_copy_data_failed;
3525 object_size = binder_get_object(target_proc, user_buffer,
3526 t->buffer, object_offset, &object);
3527 if (object_size == 0 || object_offset < off_min) {
3528 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3529 proc->pid, thread->pid,
3532 (u64)t->buffer->data_size);
3533 return_error = BR_FAILED_REPLY;
3534 return_error_param = -EINVAL;
3535 return_error_line = __LINE__;
3536 goto err_bad_offset;
3539 * Set offset to the next buffer fragment to be
3542 user_offset = object_offset + object_size;
3545 off_min = object_offset + object_size;
3546 switch (hdr->type) {
3547 case BINDER_TYPE_BINDER:
3548 case BINDER_TYPE_WEAK_BINDER: {
3549 struct flat_binder_object *fp;
3551 fp = to_flat_binder_object(hdr);
3552 ret = binder_translate_binder(fp, t, thread);
3555 binder_alloc_copy_to_buffer(&target_proc->alloc,
3559 return_error = BR_FAILED_REPLY;
3560 return_error_param = ret;
3561 return_error_line = __LINE__;
3562 goto err_translate_failed;
3565 case BINDER_TYPE_HANDLE:
3566 case BINDER_TYPE_WEAK_HANDLE: {
3567 struct flat_binder_object *fp;
3569 fp = to_flat_binder_object(hdr);
3570 ret = binder_translate_handle(fp, t, thread);
3572 binder_alloc_copy_to_buffer(&target_proc->alloc,
3576 return_error = BR_FAILED_REPLY;
3577 return_error_param = ret;
3578 return_error_line = __LINE__;
3579 goto err_translate_failed;
3583 case BINDER_TYPE_FD: {
3584 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3585 binder_size_t fd_offset = object_offset +
3586 (uintptr_t)&fp->fd - (uintptr_t)fp;
3587 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3588 thread, in_reply_to);
3592 binder_alloc_copy_to_buffer(&target_proc->alloc,
3596 return_error = BR_FAILED_REPLY;
3597 return_error_param = ret;
3598 return_error_line = __LINE__;
3599 goto err_translate_failed;
3602 case BINDER_TYPE_FDA: {
3603 struct binder_object ptr_object;
3604 binder_size_t parent_offset;
3605 struct binder_object user_object;
3606 size_t user_parent_size;
3607 struct binder_fd_array_object *fda =
3608 to_binder_fd_array_object(hdr);
3609 size_t num_valid = (buffer_offset - off_start_offset) /
3610 sizeof(binder_size_t);
3611 struct binder_buffer_object *parent =
3612 binder_validate_ptr(target_proc, t->buffer,
3613 &ptr_object, fda->parent,
3618 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3619 proc->pid, thread->pid);
3620 return_error = BR_FAILED_REPLY;
3621 return_error_param = -EINVAL;
3622 return_error_line = __LINE__;
3623 goto err_bad_parent;
3625 if (!binder_validate_fixup(target_proc, t->buffer,
3630 last_fixup_min_off)) {
3631 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3632 proc->pid, thread->pid);
3633 return_error = BR_FAILED_REPLY;
3634 return_error_param = -EINVAL;
3635 return_error_line = __LINE__;
3636 goto err_bad_parent;
3639 * We need to read the user version of the parent
3640 * object to get the original user offset
3643 binder_get_object(proc, user_buffer, t->buffer,
3644 parent_offset, &user_object);
3645 if (user_parent_size != sizeof(user_object.bbo)) {
3646 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3647 proc->pid, thread->pid,
3649 sizeof(user_object.bbo));
3650 return_error = BR_FAILED_REPLY;
3651 return_error_param = -EINVAL;
3652 return_error_line = __LINE__;
3653 goto err_bad_parent;
3655 ret = binder_translate_fd_array(&pf_head, fda,
3656 user_buffer, parent,
3657 &user_object.bbo, t,
3658 thread, in_reply_to);
3660 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3665 return_error = BR_FAILED_REPLY;
3666 return_error_param = ret > 0 ? -EINVAL : ret;
3667 return_error_line = __LINE__;
3668 goto err_translate_failed;
3670 last_fixup_obj_off = parent_offset;
3671 last_fixup_min_off =
3672 fda->parent_offset + sizeof(u32) * fda->num_fds;
3674 case BINDER_TYPE_PTR: {
3675 struct binder_buffer_object *bp =
3676 to_binder_buffer_object(hdr);
3677 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3680 if (bp->length > buf_left) {
3681 binder_user_error("%d:%d got transaction with too large buffer\n",
3682 proc->pid, thread->pid);
3683 return_error = BR_FAILED_REPLY;
3684 return_error_param = -EINVAL;
3685 return_error_line = __LINE__;
3686 goto err_bad_offset;
3688 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3689 (const void __user *)(uintptr_t)bp->buffer,
3692 return_error = BR_FAILED_REPLY;
3693 return_error_param = ret;
3694 return_error_line = __LINE__;
3695 goto err_translate_failed;
3697 /* Fixup buffer pointer to target proc address space */
3698 bp->buffer = (uintptr_t)
3699 t->buffer->user_data + sg_buf_offset;
3700 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3702 num_valid = (buffer_offset - off_start_offset) /
3703 sizeof(binder_size_t);
3704 ret = binder_fixup_parent(&pf_head, t,
3709 last_fixup_min_off);
3711 binder_alloc_copy_to_buffer(&target_proc->alloc,
3715 return_error = BR_FAILED_REPLY;
3716 return_error_param = ret;
3717 return_error_line = __LINE__;
3718 goto err_translate_failed;
3720 last_fixup_obj_off = object_offset;
3721 last_fixup_min_off = 0;
3724 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3725 proc->pid, thread->pid, hdr->type);
3726 return_error = BR_FAILED_REPLY;
3727 return_error_param = -EINVAL;
3728 return_error_line = __LINE__;
3729 goto err_bad_object_type;
3732 /* Done processing objects, copy the rest of the buffer */
3733 if (binder_alloc_copy_user_to_buffer(
3734 &target_proc->alloc,
3735 t->buffer, user_offset,
3736 user_buffer + user_offset,
3737 tr->data_size - user_offset)) {
3738 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3739 proc->pid, thread->pid);
3740 return_error = BR_FAILED_REPLY;
3741 return_error_param = -EFAULT;
3742 return_error_line = __LINE__;
3743 goto err_copy_data_failed;
3746 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3747 &sgc_head, &pf_head);
3749 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3750 proc->pid, thread->pid);
3751 return_error = BR_FAILED_REPLY;
3752 return_error_param = ret;
3753 return_error_line = __LINE__;
3754 goto err_copy_data_failed;
3756 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3757 t->work.type = BINDER_WORK_TRANSACTION;
3760 binder_enqueue_thread_work(thread, tcomplete);
3761 binder_inner_proc_lock(target_proc);
3762 if (target_thread->is_dead) {
3763 binder_inner_proc_unlock(target_proc);
3764 goto err_dead_proc_or_thread;
3766 BUG_ON(t->buffer->async_transaction != 0);
3767 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3768 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3769 binder_inner_proc_unlock(target_proc);
3770 wake_up_interruptible_sync(&target_thread->wait);
3771 binder_free_transaction(in_reply_to);
3772 } else if (!(t->flags & TF_ONE_WAY)) {
3773 BUG_ON(t->buffer->async_transaction != 0);
3774 binder_inner_proc_lock(proc);
3776 * Defer the TRANSACTION_COMPLETE, so we don't return to
3777 * userspace immediately; this allows the target process to
3778 * immediately start processing this transaction, reducing
3779 * latency. We will then return the TRANSACTION_COMPLETE when
3780 * the target replies (or there is an error).
3782 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3784 t->from_parent = thread->transaction_stack;
3785 thread->transaction_stack = t;
3786 binder_inner_proc_unlock(proc);
3787 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3788 binder_inner_proc_lock(proc);
3789 binder_pop_transaction_ilocked(thread, t);
3790 binder_inner_proc_unlock(proc);
3791 goto err_dead_proc_or_thread;
3794 BUG_ON(target_node == NULL);
3795 BUG_ON(t->buffer->async_transaction != 1);
3796 binder_enqueue_thread_work(thread, tcomplete);
3797 if (!binder_proc_transaction(t, target_proc, NULL))
3798 goto err_dead_proc_or_thread;
3801 binder_thread_dec_tmpref(target_thread);
3802 binder_proc_dec_tmpref(target_proc);
3804 binder_dec_node_tmpref(target_node);
3806 * write barrier to synchronize with initialization
3810 WRITE_ONCE(e->debug_id_done, t_debug_id);
3813 err_dead_proc_or_thread:
3814 return_error = BR_DEAD_REPLY;
3815 return_error_line = __LINE__;
3816 binder_dequeue_work(proc, tcomplete);
3817 err_translate_failed:
3818 err_bad_object_type:
3821 err_copy_data_failed:
3822 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3823 binder_free_txn_fixups(t);
3824 trace_binder_transaction_failed_buffer_release(t->buffer);
3825 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3826 buffer_offset, true);
3828 binder_dec_node_tmpref(target_node);
3830 t->buffer->transaction = NULL;
3831 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3832 err_binder_alloc_buf_failed:
3835 security_release_secctx(secctx, secctx_sz);
3836 err_get_secctx_failed:
3838 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3839 err_alloc_tcomplete_failed:
3841 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3845 err_empty_call_stack:
3847 err_invalid_target_handle:
3849 binder_thread_dec_tmpref(target_thread);
3851 binder_proc_dec_tmpref(target_proc);
3853 binder_dec_node(target_node, 1, 0);
3854 binder_dec_node_tmpref(target_node);
3857 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3858 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3859 proc->pid, thread->pid, return_error, return_error_param,
3860 (u64)tr->data_size, (u64)tr->offsets_size,
3864 struct binder_transaction_log_entry *fe;
3866 e->return_error = return_error;
3867 e->return_error_param = return_error_param;
3868 e->return_error_line = return_error_line;
3869 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3872 * write barrier to synchronize with initialization
3876 WRITE_ONCE(e->debug_id_done, t_debug_id);
3877 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3880 BUG_ON(thread->return_error.cmd != BR_OK);
3882 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3883 binder_enqueue_thread_work(thread, &thread->return_error.work);
3884 binder_send_failed_reply(in_reply_to, return_error);
3886 thread->return_error.cmd = return_error;
3887 binder_enqueue_thread_work(thread, &thread->return_error.work);
3892 * binder_free_buf() - free the specified buffer
3893 * @proc: binder proc that owns buffer
3894 * @buffer: buffer to be freed
3895 * @is_failure: failed to send transaction
3897 * If buffer for an async transaction, enqueue the next async
3898 * transaction from the node.
3900 * Cleanup buffer and free it.
3903 binder_free_buf(struct binder_proc *proc,
3904 struct binder_thread *thread,
3905 struct binder_buffer *buffer, bool is_failure)
3907 binder_inner_proc_lock(proc);
3908 if (buffer->transaction) {
3909 buffer->transaction->buffer = NULL;
3910 buffer->transaction = NULL;
3912 binder_inner_proc_unlock(proc);
3913 if (buffer->async_transaction && buffer->target_node) {
3914 struct binder_node *buf_node;
3915 struct binder_work *w;
3917 buf_node = buffer->target_node;
3918 binder_node_inner_lock(buf_node);
3919 BUG_ON(!buf_node->has_async_transaction);
3920 BUG_ON(buf_node->proc != proc);
3921 w = binder_dequeue_work_head_ilocked(
3922 &buf_node->async_todo);
3924 buf_node->has_async_transaction = false;
3926 binder_enqueue_work_ilocked(
3928 binder_wakeup_proc_ilocked(proc);
3930 binder_node_inner_unlock(buf_node);
3932 trace_binder_transaction_buffer_release(buffer);
3933 binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
3934 binder_alloc_free_buf(&proc->alloc, buffer);
3937 static int binder_thread_write(struct binder_proc *proc,
3938 struct binder_thread *thread,
3939 binder_uintptr_t binder_buffer, size_t size,
3940 binder_size_t *consumed)
3943 struct binder_context *context = proc->context;
3944 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3945 void __user *ptr = buffer + *consumed;
3946 void __user *end = buffer + size;
3948 while (ptr < end && thread->return_error.cmd == BR_OK) {
3951 if (get_user(cmd, (uint32_t __user *)ptr))
3953 ptr += sizeof(uint32_t);
3954 trace_binder_command(cmd);
3955 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3956 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3957 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3958 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3966 const char *debug_string;
3967 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3968 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3969 struct binder_ref_data rdata;
3971 if (get_user(target, (uint32_t __user *)ptr))
3974 ptr += sizeof(uint32_t);
3976 if (increment && !target) {
3977 struct binder_node *ctx_mgr_node;
3978 mutex_lock(&context->context_mgr_node_lock);
3979 ctx_mgr_node = context->binder_context_mgr_node;
3981 if (ctx_mgr_node->proc == proc) {
3982 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3983 proc->pid, thread->pid);
3984 mutex_unlock(&context->context_mgr_node_lock);
3987 ret = binder_inc_ref_for_node(
3989 strong, NULL, &rdata);
3991 mutex_unlock(&context->context_mgr_node_lock);
3994 ret = binder_update_ref_for_handle(
3995 proc, target, increment, strong,
3997 if (!ret && rdata.desc != target) {
3998 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3999 proc->pid, thread->pid,
4000 target, rdata.desc);
4004 debug_string = "IncRefs";
4007 debug_string = "Acquire";
4010 debug_string = "Release";
4014 debug_string = "DecRefs";
4018 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
4019 proc->pid, thread->pid, debug_string,
4020 strong, target, ret);
4023 binder_debug(BINDER_DEBUG_USER_REFS,
4024 "%d:%d %s ref %d desc %d s %d w %d\n",
4025 proc->pid, thread->pid, debug_string,
4026 rdata.debug_id, rdata.desc, rdata.strong,
4030 case BC_INCREFS_DONE:
4031 case BC_ACQUIRE_DONE: {
4032 binder_uintptr_t node_ptr;
4033 binder_uintptr_t cookie;
4034 struct binder_node *node;
4037 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
4039 ptr += sizeof(binder_uintptr_t);
4040 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4042 ptr += sizeof(binder_uintptr_t);
4043 node = binder_get_node(proc, node_ptr);
4045 binder_user_error("%d:%d %s u%016llx no match\n",
4046 proc->pid, thread->pid,
4047 cmd == BC_INCREFS_DONE ?
4053 if (cookie != node->cookie) {
4054 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
4055 proc->pid, thread->pid,
4056 cmd == BC_INCREFS_DONE ?
4057 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4058 (u64)node_ptr, node->debug_id,
4059 (u64)cookie, (u64)node->cookie);
4060 binder_put_node(node);
4063 binder_node_inner_lock(node);
4064 if (cmd == BC_ACQUIRE_DONE) {
4065 if (node->pending_strong_ref == 0) {
4066 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4067 proc->pid, thread->pid,
4069 binder_node_inner_unlock(node);
4070 binder_put_node(node);
4073 node->pending_strong_ref = 0;
4075 if (node->pending_weak_ref == 0) {
4076 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4077 proc->pid, thread->pid,
4079 binder_node_inner_unlock(node);
4080 binder_put_node(node);
4083 node->pending_weak_ref = 0;
4085 free_node = binder_dec_node_nilocked(node,
4086 cmd == BC_ACQUIRE_DONE, 0);
4088 binder_debug(BINDER_DEBUG_USER_REFS,
4089 "%d:%d %s node %d ls %d lw %d tr %d\n",
4090 proc->pid, thread->pid,
4091 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4092 node->debug_id, node->local_strong_refs,
4093 node->local_weak_refs, node->tmp_refs);
4094 binder_node_inner_unlock(node);
4095 binder_put_node(node);
4098 case BC_ATTEMPT_ACQUIRE:
4099 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4101 case BC_ACQUIRE_RESULT:
4102 pr_err("BC_ACQUIRE_RESULT not supported\n");
4105 case BC_FREE_BUFFER: {
4106 binder_uintptr_t data_ptr;
4107 struct binder_buffer *buffer;
4109 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4111 ptr += sizeof(binder_uintptr_t);
4113 buffer = binder_alloc_prepare_to_free(&proc->alloc,
4115 if (IS_ERR_OR_NULL(buffer)) {
4116 if (PTR_ERR(buffer) == -EPERM) {
4118 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
4119 proc->pid, thread->pid,
4123 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
4124 proc->pid, thread->pid,
4129 binder_debug(BINDER_DEBUG_FREE_BUFFER,
4130 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
4131 proc->pid, thread->pid, (u64)data_ptr,
4133 buffer->transaction ? "active" : "finished");
4134 binder_free_buf(proc, thread, buffer, false);
4138 case BC_TRANSACTION_SG:
4140 struct binder_transaction_data_sg tr;
4142 if (copy_from_user(&tr, ptr, sizeof(tr)))
4145 binder_transaction(proc, thread, &tr.transaction_data,
4146 cmd == BC_REPLY_SG, tr.buffers_size);
4149 case BC_TRANSACTION:
4151 struct binder_transaction_data tr;
4153 if (copy_from_user(&tr, ptr, sizeof(tr)))
4156 binder_transaction(proc, thread, &tr,
4157 cmd == BC_REPLY, 0);
4161 case BC_REGISTER_LOOPER:
4162 binder_debug(BINDER_DEBUG_THREADS,
4163 "%d:%d BC_REGISTER_LOOPER\n",
4164 proc->pid, thread->pid);
4165 binder_inner_proc_lock(proc);
4166 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4167 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4168 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4169 proc->pid, thread->pid);
4170 } else if (proc->requested_threads == 0) {
4171 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4172 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4173 proc->pid, thread->pid);
4175 proc->requested_threads--;
4176 proc->requested_threads_started++;
4178 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4179 binder_inner_proc_unlock(proc);
4181 case BC_ENTER_LOOPER:
4182 binder_debug(BINDER_DEBUG_THREADS,
4183 "%d:%d BC_ENTER_LOOPER\n",
4184 proc->pid, thread->pid);
4185 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4186 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4187 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4188 proc->pid, thread->pid);
4190 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4192 case BC_EXIT_LOOPER:
4193 binder_debug(BINDER_DEBUG_THREADS,
4194 "%d:%d BC_EXIT_LOOPER\n",
4195 proc->pid, thread->pid);
4196 thread->looper |= BINDER_LOOPER_STATE_EXITED;
4199 case BC_REQUEST_DEATH_NOTIFICATION:
4200 case BC_CLEAR_DEATH_NOTIFICATION: {
4202 binder_uintptr_t cookie;
4203 struct binder_ref *ref;
4204 struct binder_ref_death *death = NULL;
4206 if (get_user(target, (uint32_t __user *)ptr))
4208 ptr += sizeof(uint32_t);
4209 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4211 ptr += sizeof(binder_uintptr_t);
4212 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4214 * Allocate memory for death notification
4215 * before taking lock
4217 death = kzalloc(sizeof(*death), GFP_KERNEL);
4218 if (death == NULL) {
4219 WARN_ON(thread->return_error.cmd !=
4221 thread->return_error.cmd = BR_ERROR;
4222 binder_enqueue_thread_work(
4224 &thread->return_error.work);
4226 BINDER_DEBUG_FAILED_TRANSACTION,
4227 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4228 proc->pid, thread->pid);
4232 binder_proc_lock(proc);
4233 ref = binder_get_ref_olocked(proc, target, false);
4235 binder_user_error("%d:%d %s invalid ref %d\n",
4236 proc->pid, thread->pid,
4237 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4238 "BC_REQUEST_DEATH_NOTIFICATION" :
4239 "BC_CLEAR_DEATH_NOTIFICATION",
4241 binder_proc_unlock(proc);
4246 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4247 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4248 proc->pid, thread->pid,
4249 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4250 "BC_REQUEST_DEATH_NOTIFICATION" :
4251 "BC_CLEAR_DEATH_NOTIFICATION",
4252 (u64)cookie, ref->data.debug_id,
4253 ref->data.desc, ref->data.strong,
4254 ref->data.weak, ref->node->debug_id);
4256 binder_node_lock(ref->node);
4257 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4259 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4260 proc->pid, thread->pid);
4261 binder_node_unlock(ref->node);
4262 binder_proc_unlock(proc);
4266 binder_stats_created(BINDER_STAT_DEATH);
4267 INIT_LIST_HEAD(&death->work.entry);
4268 death->cookie = cookie;
4270 if (ref->node->proc == NULL) {
4271 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4273 binder_inner_proc_lock(proc);
4274 binder_enqueue_work_ilocked(
4275 &ref->death->work, &proc->todo);
4276 binder_wakeup_proc_ilocked(proc);
4277 binder_inner_proc_unlock(proc);
4280 if (ref->death == NULL) {
4281 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4282 proc->pid, thread->pid);
4283 binder_node_unlock(ref->node);
4284 binder_proc_unlock(proc);
4288 if (death->cookie != cookie) {
4289 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4290 proc->pid, thread->pid,
4293 binder_node_unlock(ref->node);
4294 binder_proc_unlock(proc);
4298 binder_inner_proc_lock(proc);
4299 if (list_empty(&death->work.entry)) {
4300 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4301 if (thread->looper &
4302 (BINDER_LOOPER_STATE_REGISTERED |
4303 BINDER_LOOPER_STATE_ENTERED))
4304 binder_enqueue_thread_work_ilocked(
4308 binder_enqueue_work_ilocked(
4311 binder_wakeup_proc_ilocked(
4315 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4316 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4318 binder_inner_proc_unlock(proc);
4320 binder_node_unlock(ref->node);
4321 binder_proc_unlock(proc);
4323 case BC_DEAD_BINDER_DONE: {
4324 struct binder_work *w;
4325 binder_uintptr_t cookie;
4326 struct binder_ref_death *death = NULL;
4328 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4331 ptr += sizeof(cookie);
4332 binder_inner_proc_lock(proc);
4333 list_for_each_entry(w, &proc->delivered_death,
4335 struct binder_ref_death *tmp_death =
4337 struct binder_ref_death,
4340 if (tmp_death->cookie == cookie) {
4345 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4346 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4347 proc->pid, thread->pid, (u64)cookie,
4349 if (death == NULL) {
4350 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4351 proc->pid, thread->pid, (u64)cookie);
4352 binder_inner_proc_unlock(proc);
4355 binder_dequeue_work_ilocked(&death->work);
4356 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4357 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4358 if (thread->looper &
4359 (BINDER_LOOPER_STATE_REGISTERED |
4360 BINDER_LOOPER_STATE_ENTERED))
4361 binder_enqueue_thread_work_ilocked(
4362 thread, &death->work);
4364 binder_enqueue_work_ilocked(
4367 binder_wakeup_proc_ilocked(proc);
4370 binder_inner_proc_unlock(proc);
4374 pr_err("%d:%d unknown command %d\n",
4375 proc->pid, thread->pid, cmd);
4378 *consumed = ptr - buffer;
4383 static void binder_stat_br(struct binder_proc *proc,
4384 struct binder_thread *thread, uint32_t cmd)
4386 trace_binder_return(cmd);
4387 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4388 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4389 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4390 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4394 static int binder_put_node_cmd(struct binder_proc *proc,
4395 struct binder_thread *thread,
4397 binder_uintptr_t node_ptr,
4398 binder_uintptr_t node_cookie,
4400 uint32_t cmd, const char *cmd_name)
4402 void __user *ptr = *ptrp;
4404 if (put_user(cmd, (uint32_t __user *)ptr))
4406 ptr += sizeof(uint32_t);
4408 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4410 ptr += sizeof(binder_uintptr_t);
4412 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4414 ptr += sizeof(binder_uintptr_t);
4416 binder_stat_br(proc, thread, cmd);
4417 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4418 proc->pid, thread->pid, cmd_name, node_debug_id,
4419 (u64)node_ptr, (u64)node_cookie);
4425 static int binder_wait_for_work(struct binder_thread *thread,
4429 struct binder_proc *proc = thread->proc;
4432 freezer_do_not_count();
4433 binder_inner_proc_lock(proc);
4435 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4436 if (binder_has_work_ilocked(thread, do_proc_work))
4439 list_add(&thread->waiting_thread_node,
4440 &proc->waiting_threads);
4441 binder_inner_proc_unlock(proc);
4443 binder_inner_proc_lock(proc);
4444 list_del_init(&thread->waiting_thread_node);
4445 if (signal_pending(current)) {
4450 finish_wait(&thread->wait, &wait);
4451 binder_inner_proc_unlock(proc);
4458 * binder_apply_fd_fixups() - finish fd translation
4459 * @proc: binder_proc associated @t->buffer
4460 * @t: binder transaction with list of fd fixups
4462 * Now that we are in the context of the transaction target
4463 * process, we can allocate and install fds. Process the
4464 * list of fds to translate and fixup the buffer with the
4467 * If we fail to allocate an fd, then free the resources by
4468 * fput'ing files that have not been processed and ksys_close'ing
4469 * any fds that have already been allocated.
4471 static int binder_apply_fd_fixups(struct binder_proc *proc,
4472 struct binder_transaction *t)
4474 struct binder_txn_fd_fixup *fixup, *tmp;
4477 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4478 int fd = get_unused_fd_flags(O_CLOEXEC);
4481 binder_debug(BINDER_DEBUG_TRANSACTION,
4482 "failed fd fixup txn %d fd %d\n",
4487 binder_debug(BINDER_DEBUG_TRANSACTION,
4488 "fd fixup txn %d fd %d\n",
4490 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4491 fd_install(fd, fixup->file);
4493 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4500 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4507 err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4513 binder_deferred_fd_close(fd);
4515 list_del(&fixup->fixup_entry);
4522 static int binder_thread_read(struct binder_proc *proc,
4523 struct binder_thread *thread,
4524 binder_uintptr_t binder_buffer, size_t size,
4525 binder_size_t *consumed, int non_block)
4527 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4528 void __user *ptr = buffer + *consumed;
4529 void __user *end = buffer + size;
4532 int wait_for_proc_work;
4534 if (*consumed == 0) {
4535 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4537 ptr += sizeof(uint32_t);
4541 binder_inner_proc_lock(proc);
4542 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4543 binder_inner_proc_unlock(proc);
4545 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4547 trace_binder_wait_for_work(wait_for_proc_work,
4548 !!thread->transaction_stack,
4549 !binder_worklist_empty(proc, &thread->todo));
4550 if (wait_for_proc_work) {
4551 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4552 BINDER_LOOPER_STATE_ENTERED))) {
4553 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4554 proc->pid, thread->pid, thread->looper);
4555 wait_event_interruptible(binder_user_error_wait,
4556 binder_stop_on_user_error < 2);
4558 binder_set_nice(proc->default_priority);
4562 if (!binder_has_work(thread, wait_for_proc_work))
4565 ret = binder_wait_for_work(thread, wait_for_proc_work);
4568 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4575 struct binder_transaction_data_secctx tr;
4576 struct binder_transaction_data *trd = &tr.transaction_data;
4577 struct binder_work *w = NULL;
4578 struct list_head *list = NULL;
4579 struct binder_transaction *t = NULL;
4580 struct binder_thread *t_from;
4581 size_t trsize = sizeof(*trd);
4583 binder_inner_proc_lock(proc);
4584 if (!binder_worklist_empty_ilocked(&thread->todo))
4585 list = &thread->todo;
4586 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4590 binder_inner_proc_unlock(proc);
4593 if (ptr - buffer == 4 && !thread->looper_need_return)
4598 if (end - ptr < sizeof(tr) + 4) {
4599 binder_inner_proc_unlock(proc);
4602 w = binder_dequeue_work_head_ilocked(list);
4603 if (binder_worklist_empty_ilocked(&thread->todo))
4604 thread->process_todo = false;
4607 case BINDER_WORK_TRANSACTION: {
4608 binder_inner_proc_unlock(proc);
4609 t = container_of(w, struct binder_transaction, work);
4611 case BINDER_WORK_RETURN_ERROR: {
4612 struct binder_error *e = container_of(
4613 w, struct binder_error, work);
4615 WARN_ON(e->cmd == BR_OK);
4616 binder_inner_proc_unlock(proc);
4617 if (put_user(e->cmd, (uint32_t __user *)ptr))
4621 ptr += sizeof(uint32_t);
4623 binder_stat_br(proc, thread, cmd);
4625 case BINDER_WORK_TRANSACTION_COMPLETE: {
4626 binder_inner_proc_unlock(proc);
4627 cmd = BR_TRANSACTION_COMPLETE;
4629 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4630 if (put_user(cmd, (uint32_t __user *)ptr))
4632 ptr += sizeof(uint32_t);
4634 binder_stat_br(proc, thread, cmd);
4635 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4636 "%d:%d BR_TRANSACTION_COMPLETE\n",
4637 proc->pid, thread->pid);
4639 case BINDER_WORK_NODE: {
4640 struct binder_node *node = container_of(w, struct binder_node, work);
4642 binder_uintptr_t node_ptr = node->ptr;
4643 binder_uintptr_t node_cookie = node->cookie;
4644 int node_debug_id = node->debug_id;
4647 void __user *orig_ptr = ptr;
4649 BUG_ON(proc != node->proc);
4650 strong = node->internal_strong_refs ||
4651 node->local_strong_refs;
4652 weak = !hlist_empty(&node->refs) ||
4653 node->local_weak_refs ||
4654 node->tmp_refs || strong;
4655 has_strong_ref = node->has_strong_ref;
4656 has_weak_ref = node->has_weak_ref;
4658 if (weak && !has_weak_ref) {
4659 node->has_weak_ref = 1;
4660 node->pending_weak_ref = 1;
4661 node->local_weak_refs++;
4663 if (strong && !has_strong_ref) {
4664 node->has_strong_ref = 1;
4665 node->pending_strong_ref = 1;
4666 node->local_strong_refs++;
4668 if (!strong && has_strong_ref)
4669 node->has_strong_ref = 0;
4670 if (!weak && has_weak_ref)
4671 node->has_weak_ref = 0;
4672 if (!weak && !strong) {
4673 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4674 "%d:%d node %d u%016llx c%016llx deleted\n",
4675 proc->pid, thread->pid,
4679 rb_erase(&node->rb_node, &proc->nodes);
4680 binder_inner_proc_unlock(proc);
4681 binder_node_lock(node);
4683 * Acquire the node lock before freeing the
4684 * node to serialize with other threads that
4685 * may have been holding the node lock while
4686 * decrementing this node (avoids race where
4687 * this thread frees while the other thread
4688 * is unlocking the node after the final
4691 binder_node_unlock(node);
4692 binder_free_node(node);
4694 binder_inner_proc_unlock(proc);
4696 if (weak && !has_weak_ref)
4697 ret = binder_put_node_cmd(
4698 proc, thread, &ptr, node_ptr,
4699 node_cookie, node_debug_id,
4700 BR_INCREFS, "BR_INCREFS");
4701 if (!ret && strong && !has_strong_ref)
4702 ret = binder_put_node_cmd(
4703 proc, thread, &ptr, node_ptr,
4704 node_cookie, node_debug_id,
4705 BR_ACQUIRE, "BR_ACQUIRE");
4706 if (!ret && !strong && has_strong_ref)
4707 ret = binder_put_node_cmd(
4708 proc, thread, &ptr, node_ptr,
4709 node_cookie, node_debug_id,
4710 BR_RELEASE, "BR_RELEASE");
4711 if (!ret && !weak && has_weak_ref)
4712 ret = binder_put_node_cmd(
4713 proc, thread, &ptr, node_ptr,
4714 node_cookie, node_debug_id,
4715 BR_DECREFS, "BR_DECREFS");
4716 if (orig_ptr == ptr)
4717 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4718 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4719 proc->pid, thread->pid,
4726 case BINDER_WORK_DEAD_BINDER:
4727 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4728 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4729 struct binder_ref_death *death;
4731 binder_uintptr_t cookie;
4733 death = container_of(w, struct binder_ref_death, work);
4734 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4735 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4737 cmd = BR_DEAD_BINDER;
4738 cookie = death->cookie;
4740 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4741 "%d:%d %s %016llx\n",
4742 proc->pid, thread->pid,
4743 cmd == BR_DEAD_BINDER ?
4745 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4747 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4748 binder_inner_proc_unlock(proc);
4750 binder_stats_deleted(BINDER_STAT_DEATH);
4752 binder_enqueue_work_ilocked(
4753 w, &proc->delivered_death);
4754 binder_inner_proc_unlock(proc);
4756 if (put_user(cmd, (uint32_t __user *)ptr))
4758 ptr += sizeof(uint32_t);
4759 if (put_user(cookie,
4760 (binder_uintptr_t __user *)ptr))
4762 ptr += sizeof(binder_uintptr_t);
4763 binder_stat_br(proc, thread, cmd);
4764 if (cmd == BR_DEAD_BINDER)
4765 goto done; /* DEAD_BINDER notifications can cause transactions */
4768 binder_inner_proc_unlock(proc);
4769 pr_err("%d:%d: bad work type %d\n",
4770 proc->pid, thread->pid, w->type);
4777 BUG_ON(t->buffer == NULL);
4778 if (t->buffer->target_node) {
4779 struct binder_node *target_node = t->buffer->target_node;
4781 trd->target.ptr = target_node->ptr;
4782 trd->cookie = target_node->cookie;
4783 t->saved_priority = task_nice(current);
4784 if (t->priority < target_node->min_priority &&
4785 !(t->flags & TF_ONE_WAY))
4786 binder_set_nice(t->priority);
4787 else if (!(t->flags & TF_ONE_WAY) ||
4788 t->saved_priority > target_node->min_priority)
4789 binder_set_nice(target_node->min_priority);
4790 cmd = BR_TRANSACTION;
4792 trd->target.ptr = 0;
4796 trd->code = t->code;
4797 trd->flags = t->flags;
4798 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4800 t_from = binder_get_txn_from(t);
4802 struct task_struct *sender = t_from->proc->tsk;
4805 task_tgid_nr_ns(sender,
4806 task_active_pid_ns(current));
4808 trd->sender_pid = 0;
4811 ret = binder_apply_fd_fixups(proc, t);
4813 struct binder_buffer *buffer = t->buffer;
4814 bool oneway = !!(t->flags & TF_ONE_WAY);
4815 int tid = t->debug_id;
4818 binder_thread_dec_tmpref(t_from);
4819 buffer->transaction = NULL;
4820 binder_cleanup_transaction(t, "fd fixups failed",
4822 binder_free_buf(proc, thread, buffer, true);
4823 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4824 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4825 proc->pid, thread->pid,
4827 (cmd == BR_REPLY ? "reply " : ""),
4828 tid, BR_FAILED_REPLY, ret, __LINE__);
4829 if (cmd == BR_REPLY) {
4830 cmd = BR_FAILED_REPLY;
4831 if (put_user(cmd, (uint32_t __user *)ptr))
4833 ptr += sizeof(uint32_t);
4834 binder_stat_br(proc, thread, cmd);
4839 trd->data_size = t->buffer->data_size;
4840 trd->offsets_size = t->buffer->offsets_size;
4841 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4842 trd->data.ptr.offsets = trd->data.ptr.buffer +
4843 ALIGN(t->buffer->data_size,
4846 tr.secctx = t->security_ctx;
4847 if (t->security_ctx) {
4848 cmd = BR_TRANSACTION_SEC_CTX;
4849 trsize = sizeof(tr);
4851 if (put_user(cmd, (uint32_t __user *)ptr)) {
4853 binder_thread_dec_tmpref(t_from);
4855 binder_cleanup_transaction(t, "put_user failed",
4860 ptr += sizeof(uint32_t);
4861 if (copy_to_user(ptr, &tr, trsize)) {
4863 binder_thread_dec_tmpref(t_from);
4865 binder_cleanup_transaction(t, "copy_to_user failed",
4872 trace_binder_transaction_received(t);
4873 binder_stat_br(proc, thread, cmd);
4874 binder_debug(BINDER_DEBUG_TRANSACTION,
4875 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4876 proc->pid, thread->pid,
4877 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4878 (cmd == BR_TRANSACTION_SEC_CTX) ?
4879 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4880 t->debug_id, t_from ? t_from->proc->pid : 0,
4881 t_from ? t_from->pid : 0, cmd,
4882 t->buffer->data_size, t->buffer->offsets_size,
4883 (u64)trd->data.ptr.buffer,
4884 (u64)trd->data.ptr.offsets);
4887 binder_thread_dec_tmpref(t_from);
4888 t->buffer->allow_user_free = 1;
4889 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4890 binder_inner_proc_lock(thread->proc);
4891 t->to_parent = thread->transaction_stack;
4892 t->to_thread = thread;
4893 thread->transaction_stack = t;
4894 binder_inner_proc_unlock(thread->proc);
4896 binder_free_transaction(t);
4903 *consumed = ptr - buffer;
4904 binder_inner_proc_lock(proc);
4905 if (proc->requested_threads == 0 &&
4906 list_empty(&thread->proc->waiting_threads) &&
4907 proc->requested_threads_started < proc->max_threads &&
4908 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4909 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4910 /*spawn a new thread if we leave this out */) {
4911 proc->requested_threads++;
4912 binder_inner_proc_unlock(proc);
4913 binder_debug(BINDER_DEBUG_THREADS,
4914 "%d:%d BR_SPAWN_LOOPER\n",
4915 proc->pid, thread->pid);
4916 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4918 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4920 binder_inner_proc_unlock(proc);
4924 static void binder_release_work(struct binder_proc *proc,
4925 struct list_head *list)
4927 struct binder_work *w;
4928 enum binder_work_type wtype;
4931 binder_inner_proc_lock(proc);
4932 w = binder_dequeue_work_head_ilocked(list);
4933 wtype = w ? w->type : 0;
4934 binder_inner_proc_unlock(proc);
4939 case BINDER_WORK_TRANSACTION: {
4940 struct binder_transaction *t;
4942 t = container_of(w, struct binder_transaction, work);
4944 binder_cleanup_transaction(t, "process died.",
4947 case BINDER_WORK_RETURN_ERROR: {
4948 struct binder_error *e = container_of(
4949 w, struct binder_error, work);
4951 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4952 "undelivered TRANSACTION_ERROR: %u\n",
4955 case BINDER_WORK_TRANSACTION_COMPLETE: {
4956 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4957 "undelivered TRANSACTION_COMPLETE\n");
4959 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4961 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4962 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4963 struct binder_ref_death *death;
4965 death = container_of(w, struct binder_ref_death, work);
4966 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4967 "undelivered death notification, %016llx\n",
4968 (u64)death->cookie);
4970 binder_stats_deleted(BINDER_STAT_DEATH);
4972 case BINDER_WORK_NODE:
4975 pr_err("unexpected work type, %d, not freed\n",
4983 static struct binder_thread *binder_get_thread_ilocked(
4984 struct binder_proc *proc, struct binder_thread *new_thread)
4986 struct binder_thread *thread = NULL;
4987 struct rb_node *parent = NULL;
4988 struct rb_node **p = &proc->threads.rb_node;
4992 thread = rb_entry(parent, struct binder_thread, rb_node);
4994 if (current->pid < thread->pid)
4996 else if (current->pid > thread->pid)
4997 p = &(*p)->rb_right;
5003 thread = new_thread;
5004 binder_stats_created(BINDER_STAT_THREAD);
5005 thread->proc = proc;
5006 thread->pid = current->pid;
5007 atomic_set(&thread->tmp_ref, 0);
5008 init_waitqueue_head(&thread->wait);
5009 INIT_LIST_HEAD(&thread->todo);
5010 rb_link_node(&thread->rb_node, parent, p);
5011 rb_insert_color(&thread->rb_node, &proc->threads);
5012 thread->looper_need_return = true;
5013 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
5014 thread->return_error.cmd = BR_OK;
5015 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
5016 thread->reply_error.cmd = BR_OK;
5017 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
5021 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
5023 struct binder_thread *thread;
5024 struct binder_thread *new_thread;
5026 binder_inner_proc_lock(proc);
5027 thread = binder_get_thread_ilocked(proc, NULL);
5028 binder_inner_proc_unlock(proc);
5030 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
5031 if (new_thread == NULL)
5033 binder_inner_proc_lock(proc);
5034 thread = binder_get_thread_ilocked(proc, new_thread);
5035 binder_inner_proc_unlock(proc);
5036 if (thread != new_thread)
5042 static void binder_free_proc(struct binder_proc *proc)
5044 struct binder_device *device;
5046 BUG_ON(!list_empty(&proc->todo));
5047 BUG_ON(!list_empty(&proc->delivered_death));
5048 device = container_of(proc->context, struct binder_device, context);
5049 if (refcount_dec_and_test(&device->ref)) {
5050 kfree(proc->context->name);
5053 binder_alloc_deferred_release(&proc->alloc);
5054 put_task_struct(proc->tsk);
5055 put_cred(proc->cred);
5056 binder_stats_deleted(BINDER_STAT_PROC);
5060 static void binder_free_thread(struct binder_thread *thread)
5062 BUG_ON(!list_empty(&thread->todo));
5063 binder_stats_deleted(BINDER_STAT_THREAD);
5064 binder_proc_dec_tmpref(thread->proc);
5068 static int binder_thread_release(struct binder_proc *proc,
5069 struct binder_thread *thread)
5071 struct binder_transaction *t;
5072 struct binder_transaction *send_reply = NULL;
5073 int active_transactions = 0;
5074 struct binder_transaction *last_t = NULL;
5076 binder_inner_proc_lock(thread->proc);
5078 * take a ref on the proc so it survives
5079 * after we remove this thread from proc->threads.
5080 * The corresponding dec is when we actually
5081 * free the thread in binder_free_thread()
5085 * take a ref on this thread to ensure it
5086 * survives while we are releasing it
5088 atomic_inc(&thread->tmp_ref);
5089 rb_erase(&thread->rb_node, &proc->threads);
5090 t = thread->transaction_stack;
5092 spin_lock(&t->lock);
5093 if (t->to_thread == thread)
5096 __acquire(&t->lock);
5098 thread->is_dead = true;
5102 active_transactions++;
5103 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5104 "release %d:%d transaction %d %s, still active\n",
5105 proc->pid, thread->pid,
5107 (t->to_thread == thread) ? "in" : "out");
5109 if (t->to_thread == thread) {
5111 t->to_thread = NULL;
5113 t->buffer->transaction = NULL;
5117 } else if (t->from == thread) {
5122 spin_unlock(&last_t->lock);
5124 spin_lock(&t->lock);
5126 __acquire(&t->lock);
5128 /* annotation for sparse, lock not acquired in last iteration above */
5129 __release(&t->lock);
5132 * If this thread used poll, make sure we remove the waitqueue from any
5133 * poll data structures holding it.
5135 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5136 wake_up_pollfree(&thread->wait);
5138 binder_inner_proc_unlock(thread->proc);
5141 * This is needed to avoid races between wake_up_pollfree() above and
5142 * someone else removing the last entry from the queue for other reasons
5143 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5144 * descriptor being closed). Such other users hold an RCU read lock, so
5145 * we can be sure they're done after we call synchronize_rcu().
5147 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5151 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5152 binder_release_work(proc, &thread->todo);
5153 binder_thread_dec_tmpref(thread);
5154 return active_transactions;
5157 static __poll_t binder_poll(struct file *filp,
5158 struct poll_table_struct *wait)
5160 struct binder_proc *proc = filp->private_data;
5161 struct binder_thread *thread = NULL;
5162 bool wait_for_proc_work;
5164 thread = binder_get_thread(proc);
5168 binder_inner_proc_lock(thread->proc);
5169 thread->looper |= BINDER_LOOPER_STATE_POLL;
5170 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5172 binder_inner_proc_unlock(thread->proc);
5174 poll_wait(filp, &thread->wait, wait);
5176 if (binder_has_work(thread, wait_for_proc_work))
5182 static int binder_ioctl_write_read(struct file *filp,
5183 unsigned int cmd, unsigned long arg,
5184 struct binder_thread *thread)
5187 struct binder_proc *proc = filp->private_data;
5188 unsigned int size = _IOC_SIZE(cmd);
5189 void __user *ubuf = (void __user *)arg;
5190 struct binder_write_read bwr;
5192 if (size != sizeof(struct binder_write_read)) {
5196 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5200 binder_debug(BINDER_DEBUG_READ_WRITE,
5201 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5202 proc->pid, thread->pid,
5203 (u64)bwr.write_size, (u64)bwr.write_buffer,
5204 (u64)bwr.read_size, (u64)bwr.read_buffer);
5206 if (bwr.write_size > 0) {
5207 ret = binder_thread_write(proc, thread,
5210 &bwr.write_consumed);
5211 trace_binder_write_done(ret);
5213 bwr.read_consumed = 0;
5214 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5219 if (bwr.read_size > 0) {
5220 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5223 filp->f_flags & O_NONBLOCK);
5224 trace_binder_read_done(ret);
5225 binder_inner_proc_lock(proc);
5226 if (!binder_worklist_empty_ilocked(&proc->todo))
5227 binder_wakeup_proc_ilocked(proc);
5228 binder_inner_proc_unlock(proc);
5230 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5235 binder_debug(BINDER_DEBUG_READ_WRITE,
5236 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5237 proc->pid, thread->pid,
5238 (u64)bwr.write_consumed, (u64)bwr.write_size,
5239 (u64)bwr.read_consumed, (u64)bwr.read_size);
5240 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5248 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5249 struct flat_binder_object *fbo)
5252 struct binder_proc *proc = filp->private_data;
5253 struct binder_context *context = proc->context;
5254 struct binder_node *new_node;
5255 kuid_t curr_euid = current_euid();
5257 mutex_lock(&context->context_mgr_node_lock);
5258 if (context->binder_context_mgr_node) {
5259 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5263 ret = security_binder_set_context_mgr(proc->cred);
5266 if (uid_valid(context->binder_context_mgr_uid)) {
5267 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5268 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5269 from_kuid(&init_user_ns, curr_euid),
5270 from_kuid(&init_user_ns,
5271 context->binder_context_mgr_uid));
5276 context->binder_context_mgr_uid = curr_euid;
5278 new_node = binder_new_node(proc, fbo);
5283 binder_node_lock(new_node);
5284 new_node->local_weak_refs++;
5285 new_node->local_strong_refs++;
5286 new_node->has_strong_ref = 1;
5287 new_node->has_weak_ref = 1;
5288 context->binder_context_mgr_node = new_node;
5289 binder_node_unlock(new_node);
5290 binder_put_node(new_node);
5292 mutex_unlock(&context->context_mgr_node_lock);
5296 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5297 struct binder_node_info_for_ref *info)
5299 struct binder_node *node;
5300 struct binder_context *context = proc->context;
5301 __u32 handle = info->handle;
5303 if (info->strong_count || info->weak_count || info->reserved1 ||
5304 info->reserved2 || info->reserved3) {
5305 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5310 /* This ioctl may only be used by the context manager */
5311 mutex_lock(&context->context_mgr_node_lock);
5312 if (!context->binder_context_mgr_node ||
5313 context->binder_context_mgr_node->proc != proc) {
5314 mutex_unlock(&context->context_mgr_node_lock);
5317 mutex_unlock(&context->context_mgr_node_lock);
5319 node = binder_get_node_from_ref(proc, handle, true, NULL);
5323 info->strong_count = node->local_strong_refs +
5324 node->internal_strong_refs;
5325 info->weak_count = node->local_weak_refs;
5327 binder_put_node(node);
5332 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5333 struct binder_node_debug_info *info)
5336 binder_uintptr_t ptr = info->ptr;
5338 memset(info, 0, sizeof(*info));
5340 binder_inner_proc_lock(proc);
5341 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5342 struct binder_node *node = rb_entry(n, struct binder_node,
5344 if (node->ptr > ptr) {
5345 info->ptr = node->ptr;
5346 info->cookie = node->cookie;
5347 info->has_strong_ref = node->has_strong_ref;
5348 info->has_weak_ref = node->has_weak_ref;
5352 binder_inner_proc_unlock(proc);
5357 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5360 struct binder_proc *proc = filp->private_data;
5361 struct binder_thread *thread;
5362 unsigned int size = _IOC_SIZE(cmd);
5363 void __user *ubuf = (void __user *)arg;
5365 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5366 proc->pid, current->pid, cmd, arg);*/
5368 binder_selftest_alloc(&proc->alloc);
5370 trace_binder_ioctl(cmd, arg);
5372 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5376 thread = binder_get_thread(proc);
5377 if (thread == NULL) {
5383 case BINDER_WRITE_READ:
5384 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5388 case BINDER_SET_MAX_THREADS: {
5391 if (copy_from_user(&max_threads, ubuf,
5392 sizeof(max_threads))) {
5396 binder_inner_proc_lock(proc);
5397 proc->max_threads = max_threads;
5398 binder_inner_proc_unlock(proc);
5401 case BINDER_SET_CONTEXT_MGR_EXT: {
5402 struct flat_binder_object fbo;
5404 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5408 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5413 case BINDER_SET_CONTEXT_MGR:
5414 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5418 case BINDER_THREAD_EXIT:
5419 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5420 proc->pid, thread->pid);
5421 binder_thread_release(proc, thread);
5424 case BINDER_VERSION: {
5425 struct binder_version __user *ver = ubuf;
5427 if (size != sizeof(struct binder_version)) {
5431 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5432 &ver->protocol_version)) {
5438 case BINDER_GET_NODE_INFO_FOR_REF: {
5439 struct binder_node_info_for_ref info;
5441 if (copy_from_user(&info, ubuf, sizeof(info))) {
5446 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5450 if (copy_to_user(ubuf, &info, sizeof(info))) {
5457 case BINDER_GET_NODE_DEBUG_INFO: {
5458 struct binder_node_debug_info info;
5460 if (copy_from_user(&info, ubuf, sizeof(info))) {
5465 ret = binder_ioctl_get_node_debug_info(proc, &info);
5469 if (copy_to_user(ubuf, &info, sizeof(info))) {
5482 thread->looper_need_return = false;
5483 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5484 if (ret && ret != -ERESTARTSYS)
5485 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5487 trace_binder_ioctl_done(ret);
5491 static void binder_vma_open(struct vm_area_struct *vma)
5493 struct binder_proc *proc = vma->vm_private_data;
5495 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5496 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5497 proc->pid, vma->vm_start, vma->vm_end,
5498 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5499 (unsigned long)pgprot_val(vma->vm_page_prot));
5502 static void binder_vma_close(struct vm_area_struct *vma)
5504 struct binder_proc *proc = vma->vm_private_data;
5506 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5507 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5508 proc->pid, vma->vm_start, vma->vm_end,
5509 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5510 (unsigned long)pgprot_val(vma->vm_page_prot));
5511 binder_alloc_vma_close(&proc->alloc);
5514 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5516 return VM_FAULT_SIGBUS;
5519 static const struct vm_operations_struct binder_vm_ops = {
5520 .open = binder_vma_open,
5521 .close = binder_vma_close,
5522 .fault = binder_vm_fault,
5525 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5528 struct binder_proc *proc = filp->private_data;
5529 const char *failure_string;
5531 if (proc->tsk != current->group_leader)
5534 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5535 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5536 __func__, proc->pid, vma->vm_start, vma->vm_end,
5537 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5538 (unsigned long)pgprot_val(vma->vm_page_prot));
5540 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5542 failure_string = "bad vm_flags";
5545 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5546 vma->vm_flags &= ~VM_MAYWRITE;
5548 vma->vm_ops = &binder_vm_ops;
5549 vma->vm_private_data = proc;
5551 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
5557 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5558 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
5562 static int binder_open(struct inode *nodp, struct file *filp)
5564 struct binder_proc *proc, *itr;
5565 struct binder_device *binder_dev;
5566 struct binderfs_info *info;
5567 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5568 bool existing_pid = false;
5570 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5571 current->group_leader->pid, current->pid);
5573 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5576 spin_lock_init(&proc->inner_lock);
5577 spin_lock_init(&proc->outer_lock);
5578 get_task_struct(current->group_leader);
5579 proc->tsk = current->group_leader;
5580 proc->cred = get_cred(filp->f_cred);
5581 INIT_LIST_HEAD(&proc->todo);
5582 proc->default_priority = task_nice(current);
5583 /* binderfs stashes devices in i_private */
5584 if (is_binderfs_device(nodp)) {
5585 binder_dev = nodp->i_private;
5586 info = nodp->i_sb->s_fs_info;
5587 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5589 binder_dev = container_of(filp->private_data,
5590 struct binder_device, miscdev);
5592 refcount_inc(&binder_dev->ref);
5593 proc->context = &binder_dev->context;
5594 binder_alloc_init(&proc->alloc);
5596 binder_stats_created(BINDER_STAT_PROC);
5597 proc->pid = current->group_leader->pid;
5598 INIT_LIST_HEAD(&proc->delivered_death);
5599 INIT_LIST_HEAD(&proc->waiting_threads);
5600 filp->private_data = proc;
5602 mutex_lock(&binder_procs_lock);
5603 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5604 if (itr->pid == proc->pid) {
5605 existing_pid = true;
5609 hlist_add_head(&proc->proc_node, &binder_procs);
5610 mutex_unlock(&binder_procs_lock);
5612 if (binder_debugfs_dir_entry_proc && !existing_pid) {
5615 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5617 * proc debug entries are shared between contexts.
5618 * Only create for the first PID to avoid debugfs log spamming
5619 * The printing code will anyway print all contexts for a given
5620 * PID so this is not a problem.
5622 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5623 binder_debugfs_dir_entry_proc,
5624 (void *)(unsigned long)proc->pid,
5628 if (binder_binderfs_dir_entry_proc && !existing_pid) {
5630 struct dentry *binderfs_entry;
5632 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5634 * Similar to debugfs, the process specific log file is shared
5635 * between contexts. Only create for the first PID.
5636 * This is ok since same as debugfs, the log file will contain
5637 * information on all contexts of a given PID.
5639 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5640 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5641 if (!IS_ERR(binderfs_entry)) {
5642 proc->binderfs_entry = binderfs_entry;
5646 error = PTR_ERR(binderfs_entry);
5647 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5655 static int binder_flush(struct file *filp, fl_owner_t id)
5657 struct binder_proc *proc = filp->private_data;
5659 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5664 static void binder_deferred_flush(struct binder_proc *proc)
5669 binder_inner_proc_lock(proc);
5670 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5671 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5673 thread->looper_need_return = true;
5674 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5675 wake_up_interruptible(&thread->wait);
5679 binder_inner_proc_unlock(proc);
5681 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5682 "binder_flush: %d woke %d threads\n", proc->pid,
5686 static int binder_release(struct inode *nodp, struct file *filp)
5688 struct binder_proc *proc = filp->private_data;
5690 debugfs_remove(proc->debugfs_entry);
5692 if (proc->binderfs_entry) {
5693 binderfs_remove_file(proc->binderfs_entry);
5694 proc->binderfs_entry = NULL;
5697 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5702 static int binder_node_release(struct binder_node *node, int refs)
5704 struct binder_ref *ref;
5706 struct binder_proc *proc = node->proc;
5708 binder_release_work(proc, &node->async_todo);
5710 binder_node_lock(node);
5711 binder_inner_proc_lock(proc);
5712 binder_dequeue_work_ilocked(&node->work);
5714 * The caller must have taken a temporary ref on the node,
5716 BUG_ON(!node->tmp_refs);
5717 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5718 binder_inner_proc_unlock(proc);
5719 binder_node_unlock(node);
5720 binder_free_node(node);
5726 node->local_strong_refs = 0;
5727 node->local_weak_refs = 0;
5728 binder_inner_proc_unlock(proc);
5730 spin_lock(&binder_dead_nodes_lock);
5731 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5732 spin_unlock(&binder_dead_nodes_lock);
5734 hlist_for_each_entry(ref, &node->refs, node_entry) {
5737 * Need the node lock to synchronize
5738 * with new notification requests and the
5739 * inner lock to synchronize with queued
5740 * death notifications.
5742 binder_inner_proc_lock(ref->proc);
5744 binder_inner_proc_unlock(ref->proc);
5750 BUG_ON(!list_empty(&ref->death->work.entry));
5751 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5752 binder_enqueue_work_ilocked(&ref->death->work,
5754 binder_wakeup_proc_ilocked(ref->proc);
5755 binder_inner_proc_unlock(ref->proc);
5758 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5759 "node %d now dead, refs %d, death %d\n",
5760 node->debug_id, refs, death);
5761 binder_node_unlock(node);
5762 binder_put_node(node);
5767 static void binder_deferred_release(struct binder_proc *proc)
5769 struct binder_context *context = proc->context;
5771 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5773 mutex_lock(&binder_procs_lock);
5774 hlist_del(&proc->proc_node);
5775 mutex_unlock(&binder_procs_lock);
5777 mutex_lock(&context->context_mgr_node_lock);
5778 if (context->binder_context_mgr_node &&
5779 context->binder_context_mgr_node->proc == proc) {
5780 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5781 "%s: %d context_mgr_node gone\n",
5782 __func__, proc->pid);
5783 context->binder_context_mgr_node = NULL;
5785 mutex_unlock(&context->context_mgr_node_lock);
5786 binder_inner_proc_lock(proc);
5788 * Make sure proc stays alive after we
5789 * remove all the threads
5793 proc->is_dead = true;
5795 active_transactions = 0;
5796 while ((n = rb_first(&proc->threads))) {
5797 struct binder_thread *thread;
5799 thread = rb_entry(n, struct binder_thread, rb_node);
5800 binder_inner_proc_unlock(proc);
5802 active_transactions += binder_thread_release(proc, thread);
5803 binder_inner_proc_lock(proc);
5808 while ((n = rb_first(&proc->nodes))) {
5809 struct binder_node *node;
5811 node = rb_entry(n, struct binder_node, rb_node);
5814 * take a temporary ref on the node before
5815 * calling binder_node_release() which will either
5816 * kfree() the node or call binder_put_node()
5818 binder_inc_node_tmpref_ilocked(node);
5819 rb_erase(&node->rb_node, &proc->nodes);
5820 binder_inner_proc_unlock(proc);
5821 incoming_refs = binder_node_release(node, incoming_refs);
5822 binder_inner_proc_lock(proc);
5824 binder_inner_proc_unlock(proc);
5827 binder_proc_lock(proc);
5828 while ((n = rb_first(&proc->refs_by_desc))) {
5829 struct binder_ref *ref;
5831 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5833 binder_cleanup_ref_olocked(ref);
5834 binder_proc_unlock(proc);
5835 binder_free_ref(ref);
5836 binder_proc_lock(proc);
5838 binder_proc_unlock(proc);
5840 binder_release_work(proc, &proc->todo);
5841 binder_release_work(proc, &proc->delivered_death);
5843 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5844 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5845 __func__, proc->pid, threads, nodes, incoming_refs,
5846 outgoing_refs, active_transactions);
5848 binder_proc_dec_tmpref(proc);
5851 static void binder_deferred_func(struct work_struct *work)
5853 struct binder_proc *proc;
5858 mutex_lock(&binder_deferred_lock);
5859 if (!hlist_empty(&binder_deferred_list)) {
5860 proc = hlist_entry(binder_deferred_list.first,
5861 struct binder_proc, deferred_work_node);
5862 hlist_del_init(&proc->deferred_work_node);
5863 defer = proc->deferred_work;
5864 proc->deferred_work = 0;
5869 mutex_unlock(&binder_deferred_lock);
5871 if (defer & BINDER_DEFERRED_FLUSH)
5872 binder_deferred_flush(proc);
5874 if (defer & BINDER_DEFERRED_RELEASE)
5875 binder_deferred_release(proc); /* frees proc */
5878 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5881 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5883 mutex_lock(&binder_deferred_lock);
5884 proc->deferred_work |= defer;
5885 if (hlist_unhashed(&proc->deferred_work_node)) {
5886 hlist_add_head(&proc->deferred_work_node,
5887 &binder_deferred_list);
5888 schedule_work(&binder_deferred_work);
5890 mutex_unlock(&binder_deferred_lock);
5893 static void print_binder_transaction_ilocked(struct seq_file *m,
5894 struct binder_proc *proc,
5896 struct binder_transaction *t)
5898 struct binder_proc *to_proc;
5899 struct binder_buffer *buffer = t->buffer;
5901 spin_lock(&t->lock);
5902 to_proc = t->to_proc;
5904 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5905 prefix, t->debug_id, t,
5906 t->from ? t->from->proc->pid : 0,
5907 t->from ? t->from->pid : 0,
5908 to_proc ? to_proc->pid : 0,
5909 t->to_thread ? t->to_thread->pid : 0,
5910 t->code, t->flags, t->priority, t->need_reply);
5911 spin_unlock(&t->lock);
5913 if (proc != to_proc) {
5915 * Can only safely deref buffer if we are holding the
5916 * correct proc inner lock for this node
5922 if (buffer == NULL) {
5923 seq_puts(m, " buffer free\n");
5926 if (buffer->target_node)
5927 seq_printf(m, " node %d", buffer->target_node->debug_id);
5928 seq_printf(m, " size %zd:%zd data %pK\n",
5929 buffer->data_size, buffer->offsets_size,
5933 static void print_binder_work_ilocked(struct seq_file *m,
5934 struct binder_proc *proc,
5936 const char *transaction_prefix,
5937 struct binder_work *w)
5939 struct binder_node *node;
5940 struct binder_transaction *t;
5943 case BINDER_WORK_TRANSACTION:
5944 t = container_of(w, struct binder_transaction, work);
5945 print_binder_transaction_ilocked(
5946 m, proc, transaction_prefix, t);
5948 case BINDER_WORK_RETURN_ERROR: {
5949 struct binder_error *e = container_of(
5950 w, struct binder_error, work);
5952 seq_printf(m, "%stransaction error: %u\n",
5955 case BINDER_WORK_TRANSACTION_COMPLETE:
5956 seq_printf(m, "%stransaction complete\n", prefix);
5958 case BINDER_WORK_NODE:
5959 node = container_of(w, struct binder_node, work);
5960 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5961 prefix, node->debug_id,
5962 (u64)node->ptr, (u64)node->cookie);
5964 case BINDER_WORK_DEAD_BINDER:
5965 seq_printf(m, "%shas dead binder\n", prefix);
5967 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5968 seq_printf(m, "%shas cleared dead binder\n", prefix);
5970 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5971 seq_printf(m, "%shas cleared death notification\n", prefix);
5974 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5979 static void print_binder_thread_ilocked(struct seq_file *m,
5980 struct binder_thread *thread,
5983 struct binder_transaction *t;
5984 struct binder_work *w;
5985 size_t start_pos = m->count;
5988 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5989 thread->pid, thread->looper,
5990 thread->looper_need_return,
5991 atomic_read(&thread->tmp_ref));
5992 header_pos = m->count;
5993 t = thread->transaction_stack;
5995 if (t->from == thread) {
5996 print_binder_transaction_ilocked(m, thread->proc,
5997 " outgoing transaction", t);
5999 } else if (t->to_thread == thread) {
6000 print_binder_transaction_ilocked(m, thread->proc,
6001 " incoming transaction", t);
6004 print_binder_transaction_ilocked(m, thread->proc,
6005 " bad transaction", t);
6009 list_for_each_entry(w, &thread->todo, entry) {
6010 print_binder_work_ilocked(m, thread->proc, " ",
6011 " pending transaction", w);
6013 if (!print_always && m->count == header_pos)
6014 m->count = start_pos;
6017 static void print_binder_node_nilocked(struct seq_file *m,
6018 struct binder_node *node)
6020 struct binder_ref *ref;
6021 struct binder_work *w;
6025 hlist_for_each_entry(ref, &node->refs, node_entry)
6028 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6029 node->debug_id, (u64)node->ptr, (u64)node->cookie,
6030 node->has_strong_ref, node->has_weak_ref,
6031 node->local_strong_refs, node->local_weak_refs,
6032 node->internal_strong_refs, count, node->tmp_refs);
6034 seq_puts(m, " proc");
6035 hlist_for_each_entry(ref, &node->refs, node_entry)
6036 seq_printf(m, " %d", ref->proc->pid);
6040 list_for_each_entry(w, &node->async_todo, entry)
6041 print_binder_work_ilocked(m, node->proc, " ",
6042 " pending async transaction", w);
6046 static void print_binder_ref_olocked(struct seq_file *m,
6047 struct binder_ref *ref)
6049 binder_node_lock(ref->node);
6050 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
6051 ref->data.debug_id, ref->data.desc,
6052 ref->node->proc ? "" : "dead ",
6053 ref->node->debug_id, ref->data.strong,
6054 ref->data.weak, ref->death);
6055 binder_node_unlock(ref->node);
6058 static void print_binder_proc(struct seq_file *m,
6059 struct binder_proc *proc, int print_all)
6061 struct binder_work *w;
6063 size_t start_pos = m->count;
6065 struct binder_node *last_node = NULL;
6067 seq_printf(m, "proc %d\n", proc->pid);
6068 seq_printf(m, "context %s\n", proc->context->name);
6069 header_pos = m->count;
6071 binder_inner_proc_lock(proc);
6072 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6073 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6074 rb_node), print_all);
6076 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6077 struct binder_node *node = rb_entry(n, struct binder_node,
6079 if (!print_all && !node->has_async_transaction)
6083 * take a temporary reference on the node so it
6084 * survives and isn't removed from the tree
6085 * while we print it.
6087 binder_inc_node_tmpref_ilocked(node);
6088 /* Need to drop inner lock to take node lock */
6089 binder_inner_proc_unlock(proc);
6091 binder_put_node(last_node);
6092 binder_node_inner_lock(node);
6093 print_binder_node_nilocked(m, node);
6094 binder_node_inner_unlock(node);
6096 binder_inner_proc_lock(proc);
6098 binder_inner_proc_unlock(proc);
6100 binder_put_node(last_node);
6103 binder_proc_lock(proc);
6104 for (n = rb_first(&proc->refs_by_desc);
6107 print_binder_ref_olocked(m, rb_entry(n,
6110 binder_proc_unlock(proc);
6112 binder_alloc_print_allocated(m, &proc->alloc);
6113 binder_inner_proc_lock(proc);
6114 list_for_each_entry(w, &proc->todo, entry)
6115 print_binder_work_ilocked(m, proc, " ",
6116 " pending transaction", w);
6117 list_for_each_entry(w, &proc->delivered_death, entry) {
6118 seq_puts(m, " has delivered dead binder\n");
6121 binder_inner_proc_unlock(proc);
6122 if (!print_all && m->count == header_pos)
6123 m->count = start_pos;
6126 static const char * const binder_return_strings[] = {
6131 "BR_ACQUIRE_RESULT",
6133 "BR_TRANSACTION_COMPLETE",
6138 "BR_ATTEMPT_ACQUIRE",
6143 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6147 static const char * const binder_command_strings[] = {
6150 "BC_ACQUIRE_RESULT",
6158 "BC_ATTEMPT_ACQUIRE",
6159 "BC_REGISTER_LOOPER",
6162 "BC_REQUEST_DEATH_NOTIFICATION",
6163 "BC_CLEAR_DEATH_NOTIFICATION",
6164 "BC_DEAD_BINDER_DONE",
6165 "BC_TRANSACTION_SG",
6169 static const char * const binder_objstat_strings[] = {
6176 "transaction_complete"
6179 static void print_binder_stats(struct seq_file *m, const char *prefix,
6180 struct binder_stats *stats)
6184 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6185 ARRAY_SIZE(binder_command_strings));
6186 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6187 int temp = atomic_read(&stats->bc[i]);
6190 seq_printf(m, "%s%s: %d\n", prefix,
6191 binder_command_strings[i], temp);
6194 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6195 ARRAY_SIZE(binder_return_strings));
6196 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6197 int temp = atomic_read(&stats->br[i]);
6200 seq_printf(m, "%s%s: %d\n", prefix,
6201 binder_return_strings[i], temp);
6204 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6205 ARRAY_SIZE(binder_objstat_strings));
6206 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6207 ARRAY_SIZE(stats->obj_deleted));
6208 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6209 int created = atomic_read(&stats->obj_created[i]);
6210 int deleted = atomic_read(&stats->obj_deleted[i]);
6212 if (created || deleted)
6213 seq_printf(m, "%s%s: active %d total %d\n",
6215 binder_objstat_strings[i],
6221 static void print_binder_proc_stats(struct seq_file *m,
6222 struct binder_proc *proc)
6224 struct binder_work *w;
6225 struct binder_thread *thread;
6227 int count, strong, weak, ready_threads;
6228 size_t free_async_space =
6229 binder_alloc_get_free_async_space(&proc->alloc);
6231 seq_printf(m, "proc %d\n", proc->pid);
6232 seq_printf(m, "context %s\n", proc->context->name);
6235 binder_inner_proc_lock(proc);
6236 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6239 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6242 seq_printf(m, " threads: %d\n", count);
6243 seq_printf(m, " requested threads: %d+%d/%d\n"
6244 " ready threads %d\n"
6245 " free async space %zd\n", proc->requested_threads,
6246 proc->requested_threads_started, proc->max_threads,
6250 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6252 binder_inner_proc_unlock(proc);
6253 seq_printf(m, " nodes: %d\n", count);
6257 binder_proc_lock(proc);
6258 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6259 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6262 strong += ref->data.strong;
6263 weak += ref->data.weak;
6265 binder_proc_unlock(proc);
6266 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6268 count = binder_alloc_get_allocated_count(&proc->alloc);
6269 seq_printf(m, " buffers: %d\n", count);
6271 binder_alloc_print_pages(m, &proc->alloc);
6274 binder_inner_proc_lock(proc);
6275 list_for_each_entry(w, &proc->todo, entry) {
6276 if (w->type == BINDER_WORK_TRANSACTION)
6279 binder_inner_proc_unlock(proc);
6280 seq_printf(m, " pending transactions: %d\n", count);
6282 print_binder_stats(m, " ", &proc->stats);
6286 int binder_state_show(struct seq_file *m, void *unused)
6288 struct binder_proc *proc;
6289 struct binder_node *node;
6290 struct binder_node *last_node = NULL;
6292 seq_puts(m, "binder state:\n");
6294 spin_lock(&binder_dead_nodes_lock);
6295 if (!hlist_empty(&binder_dead_nodes))
6296 seq_puts(m, "dead nodes:\n");
6297 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6299 * take a temporary reference on the node so it
6300 * survives and isn't removed from the list
6301 * while we print it.
6304 spin_unlock(&binder_dead_nodes_lock);
6306 binder_put_node(last_node);
6307 binder_node_lock(node);
6308 print_binder_node_nilocked(m, node);
6309 binder_node_unlock(node);
6311 spin_lock(&binder_dead_nodes_lock);
6313 spin_unlock(&binder_dead_nodes_lock);
6315 binder_put_node(last_node);
6317 mutex_lock(&binder_procs_lock);
6318 hlist_for_each_entry(proc, &binder_procs, proc_node)
6319 print_binder_proc(m, proc, 1);
6320 mutex_unlock(&binder_procs_lock);
6325 int binder_stats_show(struct seq_file *m, void *unused)
6327 struct binder_proc *proc;
6329 seq_puts(m, "binder stats:\n");
6331 print_binder_stats(m, "", &binder_stats);
6333 mutex_lock(&binder_procs_lock);
6334 hlist_for_each_entry(proc, &binder_procs, proc_node)
6335 print_binder_proc_stats(m, proc);
6336 mutex_unlock(&binder_procs_lock);
6341 int binder_transactions_show(struct seq_file *m, void *unused)
6343 struct binder_proc *proc;
6345 seq_puts(m, "binder transactions:\n");
6346 mutex_lock(&binder_procs_lock);
6347 hlist_for_each_entry(proc, &binder_procs, proc_node)
6348 print_binder_proc(m, proc, 0);
6349 mutex_unlock(&binder_procs_lock);
6354 static int proc_show(struct seq_file *m, void *unused)
6356 struct binder_proc *itr;
6357 int pid = (unsigned long)m->private;
6359 mutex_lock(&binder_procs_lock);
6360 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6361 if (itr->pid == pid) {
6362 seq_puts(m, "binder proc state:\n");
6363 print_binder_proc(m, itr, 1);
6366 mutex_unlock(&binder_procs_lock);
6371 static void print_binder_transaction_log_entry(struct seq_file *m,
6372 struct binder_transaction_log_entry *e)
6374 int debug_id = READ_ONCE(e->debug_id_done);
6376 * read barrier to guarantee debug_id_done read before
6377 * we print the log values
6381 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6382 e->debug_id, (e->call_type == 2) ? "reply" :
6383 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6384 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6385 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6386 e->return_error, e->return_error_param,
6387 e->return_error_line);
6389 * read-barrier to guarantee read of debug_id_done after
6390 * done printing the fields of the entry
6393 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6394 "\n" : " (incomplete)\n");
6397 int binder_transaction_log_show(struct seq_file *m, void *unused)
6399 struct binder_transaction_log *log = m->private;
6400 unsigned int log_cur = atomic_read(&log->cur);
6405 count = log_cur + 1;
6406 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6407 0 : count % ARRAY_SIZE(log->entry);
6408 if (count > ARRAY_SIZE(log->entry) || log->full)
6409 count = ARRAY_SIZE(log->entry);
6410 for (i = 0; i < count; i++) {
6411 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6413 print_binder_transaction_log_entry(m, &log->entry[index]);
6418 const struct file_operations binder_fops = {
6419 .owner = THIS_MODULE,
6420 .poll = binder_poll,
6421 .unlocked_ioctl = binder_ioctl,
6422 .compat_ioctl = binder_ioctl,
6423 .mmap = binder_mmap,
6424 .open = binder_open,
6425 .flush = binder_flush,
6426 .release = binder_release,
6427 .may_pollfree = true,
6430 static int __init init_binder_device(const char *name)
6433 struct binder_device *binder_device;
6435 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6439 binder_device->miscdev.fops = &binder_fops;
6440 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6441 binder_device->miscdev.name = name;
6443 refcount_set(&binder_device->ref, 1);
6444 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6445 binder_device->context.name = name;
6446 mutex_init(&binder_device->context.context_mgr_node_lock);
6448 ret = misc_register(&binder_device->miscdev);
6450 kfree(binder_device);
6454 hlist_add_head(&binder_device->hlist, &binder_devices);
6459 static int __init binder_init(void)
6462 char *device_name, *device_tmp;
6463 struct binder_device *device;
6464 struct hlist_node *tmp;
6465 char *device_names = NULL;
6467 ret = binder_alloc_shrinker_init();
6471 atomic_set(&binder_transaction_log.cur, ~0U);
6472 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6474 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6475 if (binder_debugfs_dir_entry_root)
6476 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6477 binder_debugfs_dir_entry_root);
6479 if (binder_debugfs_dir_entry_root) {
6480 debugfs_create_file("state",
6482 binder_debugfs_dir_entry_root,
6484 &binder_state_fops);
6485 debugfs_create_file("stats",
6487 binder_debugfs_dir_entry_root,
6489 &binder_stats_fops);
6490 debugfs_create_file("transactions",
6492 binder_debugfs_dir_entry_root,
6494 &binder_transactions_fops);
6495 debugfs_create_file("transaction_log",
6497 binder_debugfs_dir_entry_root,
6498 &binder_transaction_log,
6499 &binder_transaction_log_fops);
6500 debugfs_create_file("failed_transaction_log",
6502 binder_debugfs_dir_entry_root,
6503 &binder_transaction_log_failed,
6504 &binder_transaction_log_fops);
6507 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6508 strcmp(binder_devices_param, "") != 0) {
6510 * Copy the module_parameter string, because we don't want to
6511 * tokenize it in-place.
6513 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6514 if (!device_names) {
6516 goto err_alloc_device_names_failed;
6519 device_tmp = device_names;
6520 while ((device_name = strsep(&device_tmp, ","))) {
6521 ret = init_binder_device(device_name);
6523 goto err_init_binder_device_failed;
6527 ret = init_binderfs();
6529 goto err_init_binder_device_failed;
6533 err_init_binder_device_failed:
6534 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6535 misc_deregister(&device->miscdev);
6536 hlist_del(&device->hlist);
6540 kfree(device_names);
6542 err_alloc_device_names_failed:
6543 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6548 device_initcall(binder_init);
6550 #define CREATE_TRACE_POINTS
6551 #include "binder_trace.h"
6553 MODULE_LICENSE("GPL v2");