3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <asm/cacheflush.h>
21 #include <linux/fdtable.h>
22 #include <linux/file.h>
23 #include <linux/freezer.h>
25 #include <linux/list.h>
26 #include <linux/miscdevice.h>
28 #include <linux/module.h>
29 #include <linux/mutex.h>
30 #include <linux/nsproxy.h>
31 #include <linux/poll.h>
32 #include <linux/debugfs.h>
33 #include <linux/rbtree.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/uaccess.h>
37 #include <linux/vmalloc.h>
38 #include <linux/slab.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/security.h>
42 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
43 #define BINDER_IPC_32BIT 1
46 #include <uapi/linux/android/binder.h>
47 #include "binder_trace.h"
49 static DEFINE_MUTEX(binder_main_lock);
50 static DEFINE_MUTEX(binder_deferred_lock);
51 static DEFINE_MUTEX(binder_mmap_lock);
53 static HLIST_HEAD(binder_procs);
54 static HLIST_HEAD(binder_deferred_list);
55 static HLIST_HEAD(binder_dead_nodes);
57 static struct dentry *binder_debugfs_dir_entry_root;
58 static struct dentry *binder_debugfs_dir_entry_proc;
59 static struct binder_node *binder_context_mgr_node;
60 static kuid_t binder_context_mgr_uid = INVALID_UID;
61 static int binder_last_id;
62 static struct workqueue_struct *binder_deferred_workqueue;
64 #define BINDER_DEBUG_ENTRY(name) \
65 static int binder_##name##_open(struct inode *inode, struct file *file) \
67 return single_open(file, binder_##name##_show, inode->i_private); \
70 static const struct file_operations binder_##name##_fops = { \
71 .owner = THIS_MODULE, \
72 .open = binder_##name##_open, \
74 .llseek = seq_lseek, \
75 .release = single_release, \
78 static int binder_proc_show(struct seq_file *m, void *unused);
79 BINDER_DEBUG_ENTRY(proc);
81 /* This is only defined in include/asm-arm/sizes.h */
87 #define SZ_4M 0x400000
90 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
92 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
95 BINDER_DEBUG_USER_ERROR = 1U << 0,
96 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
97 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
98 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
99 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
100 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
101 BINDER_DEBUG_READ_WRITE = 1U << 6,
102 BINDER_DEBUG_USER_REFS = 1U << 7,
103 BINDER_DEBUG_THREADS = 1U << 8,
104 BINDER_DEBUG_TRANSACTION = 1U << 9,
105 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
106 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
107 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
108 BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
109 BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
110 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
112 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
113 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
114 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
116 static bool binder_debug_no_lock;
117 module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
119 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
120 static int binder_stop_on_user_error;
122 static int binder_set_stop_on_user_error(const char *val,
123 struct kernel_param *kp)
127 ret = param_set_int(val, kp);
128 if (binder_stop_on_user_error < 2)
129 wake_up(&binder_user_error_wait);
132 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
133 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
135 #define binder_debug(mask, x...) \
137 if (binder_debug_mask & mask) \
141 #define binder_user_error(x...) \
143 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
145 if (binder_stop_on_user_error) \
146 binder_stop_on_user_error = 2; \
149 enum binder_stat_types {
155 BINDER_STAT_TRANSACTION,
156 BINDER_STAT_TRANSACTION_COMPLETE,
160 struct binder_stats {
161 int br[_IOC_NR(BR_FAILED_REPLY) + 1];
162 int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
163 int obj_created[BINDER_STAT_COUNT];
164 int obj_deleted[BINDER_STAT_COUNT];
167 static struct binder_stats binder_stats;
169 static inline void binder_stats_deleted(enum binder_stat_types type)
171 binder_stats.obj_deleted[type]++;
174 static inline void binder_stats_created(enum binder_stat_types type)
176 binder_stats.obj_created[type]++;
179 struct binder_transaction_log_entry {
191 struct binder_transaction_log {
194 struct binder_transaction_log_entry entry[32];
196 static struct binder_transaction_log binder_transaction_log;
197 static struct binder_transaction_log binder_transaction_log_failed;
199 static struct binder_transaction_log_entry *binder_transaction_log_add(
200 struct binder_transaction_log *log)
202 struct binder_transaction_log_entry *e;
204 e = &log->entry[log->next];
205 memset(e, 0, sizeof(*e));
207 if (log->next == ARRAY_SIZE(log->entry)) {
215 struct list_head entry;
217 BINDER_WORK_TRANSACTION = 1,
218 BINDER_WORK_TRANSACTION_COMPLETE,
220 BINDER_WORK_DEAD_BINDER,
221 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
222 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
228 struct binder_work work;
230 struct rb_node rb_node;
231 struct hlist_node dead_node;
233 struct binder_proc *proc;
234 struct hlist_head refs;
235 int internal_strong_refs;
237 int local_strong_refs;
238 binder_uintptr_t ptr;
239 binder_uintptr_t cookie;
240 unsigned has_strong_ref:1;
241 unsigned pending_strong_ref:1;
242 unsigned has_weak_ref:1;
243 unsigned pending_weak_ref:1;
244 unsigned has_async_transaction:1;
245 unsigned accept_fds:1;
246 unsigned min_priority:8;
247 struct list_head async_todo;
250 struct binder_ref_death {
251 struct binder_work work;
252 binder_uintptr_t cookie;
256 /* Lookups needed: */
257 /* node + proc => ref (transaction) */
258 /* desc + proc => ref (transaction, inc/dec ref) */
259 /* node => refs + procs (proc exit) */
261 struct rb_node rb_node_desc;
262 struct rb_node rb_node_node;
263 struct hlist_node node_entry;
264 struct binder_proc *proc;
265 struct binder_node *node;
269 struct binder_ref_death *death;
272 struct binder_buffer {
273 struct list_head entry; /* free and allocated entries by address */
274 struct rb_node rb_node; /* free entry by size or allocated entry */
277 unsigned allow_user_free:1;
278 unsigned async_transaction:1;
279 unsigned debug_id:29;
281 struct binder_transaction *transaction;
283 struct binder_node *target_node;
289 enum binder_deferred_state {
290 BINDER_DEFERRED_PUT_FILES = 0x01,
291 BINDER_DEFERRED_FLUSH = 0x02,
292 BINDER_DEFERRED_RELEASE = 0x04,
296 struct hlist_node proc_node;
297 struct rb_root threads;
298 struct rb_root nodes;
299 struct rb_root refs_by_desc;
300 struct rb_root refs_by_node;
302 struct vm_area_struct *vma;
303 struct mm_struct *vma_vm_mm;
304 struct task_struct *tsk;
305 struct files_struct *files;
306 const struct cred *cred;
307 struct hlist_node deferred_work_node;
310 ptrdiff_t user_buffer_offset;
312 struct list_head buffers;
313 struct rb_root free_buffers;
314 struct rb_root allocated_buffers;
315 size_t free_async_space;
319 uint32_t buffer_free;
320 struct list_head todo;
321 wait_queue_head_t wait;
322 struct binder_stats stats;
323 struct list_head delivered_death;
325 int requested_threads;
326 int requested_threads_started;
328 long default_priority;
329 struct dentry *debugfs_entry;
333 BINDER_LOOPER_STATE_REGISTERED = 0x01,
334 BINDER_LOOPER_STATE_ENTERED = 0x02,
335 BINDER_LOOPER_STATE_EXITED = 0x04,
336 BINDER_LOOPER_STATE_INVALID = 0x08,
337 BINDER_LOOPER_STATE_WAITING = 0x10,
338 BINDER_LOOPER_STATE_NEED_RETURN = 0x20,
339 BINDER_LOOPER_STATE_POLL = 0x40,
342 struct binder_thread {
343 struct binder_proc *proc;
344 struct rb_node rb_node;
347 struct binder_transaction *transaction_stack;
348 struct list_head todo;
349 uint32_t return_error; /* Write failed, return error code in read buf */
350 uint32_t return_error2; /* Write failed, return error code in read */
351 /* buffer. Used when sending a reply to a dead process that */
352 /* we are also waiting on */
353 wait_queue_head_t wait;
354 struct binder_stats stats;
357 struct binder_transaction {
359 struct binder_work work;
360 struct binder_thread *from;
361 struct binder_transaction *from_parent;
362 struct binder_proc *to_proc;
363 struct binder_thread *to_thread;
364 struct binder_transaction *to_parent;
365 unsigned need_reply:1;
366 /* unsigned is_dead:1; */ /* not used at the moment */
368 struct binder_buffer *buffer;
377 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
379 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
381 struct files_struct *files = proc->files;
382 unsigned long rlim_cur;
388 if (!lock_task_sighand(proc->tsk, &irqs))
391 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
392 unlock_task_sighand(proc->tsk, &irqs);
394 return __alloc_fd(files, 0, rlim_cur, flags);
398 * copied from fd_install
400 static void task_fd_install(
401 struct binder_proc *proc, unsigned int fd, struct file *file)
404 __fd_install(proc->files, fd, file);
408 * copied from sys_close
410 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
414 if (proc->files == NULL)
417 retval = __close_fd(proc->files, fd);
418 /* can't restart close syscall because file table entry was cleared */
419 if (unlikely(retval == -ERESTARTSYS ||
420 retval == -ERESTARTNOINTR ||
421 retval == -ERESTARTNOHAND ||
422 retval == -ERESTART_RESTARTBLOCK))
428 static inline void binder_lock(const char *tag)
430 trace_binder_lock(tag);
431 mutex_lock(&binder_main_lock);
432 trace_binder_locked(tag);
435 static inline void binder_unlock(const char *tag)
437 trace_binder_unlock(tag);
438 mutex_unlock(&binder_main_lock);
441 static void binder_set_nice(long nice)
445 if (can_nice(current, nice)) {
446 set_user_nice(current, nice);
449 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
450 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
451 "%d: nice value %ld not allowed use %ld instead\n",
452 current->pid, nice, min_nice);
453 set_user_nice(current, min_nice);
454 if (min_nice <= MAX_NICE)
456 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
459 static size_t binder_buffer_size(struct binder_proc *proc,
460 struct binder_buffer *buffer)
462 if (list_is_last(&buffer->entry, &proc->buffers))
463 return proc->buffer + proc->buffer_size - (void *)buffer->data;
464 return (size_t)list_entry(buffer->entry.next,
465 struct binder_buffer, entry) - (size_t)buffer->data;
468 static void binder_insert_free_buffer(struct binder_proc *proc,
469 struct binder_buffer *new_buffer)
471 struct rb_node **p = &proc->free_buffers.rb_node;
472 struct rb_node *parent = NULL;
473 struct binder_buffer *buffer;
475 size_t new_buffer_size;
477 BUG_ON(!new_buffer->free);
479 new_buffer_size = binder_buffer_size(proc, new_buffer);
481 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
482 "%d: add free buffer, size %zd, at %pK\n",
483 proc->pid, new_buffer_size, new_buffer);
487 buffer = rb_entry(parent, struct binder_buffer, rb_node);
488 BUG_ON(!buffer->free);
490 buffer_size = binder_buffer_size(proc, buffer);
492 if (new_buffer_size < buffer_size)
493 p = &parent->rb_left;
495 p = &parent->rb_right;
497 rb_link_node(&new_buffer->rb_node, parent, p);
498 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
501 static void binder_insert_allocated_buffer(struct binder_proc *proc,
502 struct binder_buffer *new_buffer)
504 struct rb_node **p = &proc->allocated_buffers.rb_node;
505 struct rb_node *parent = NULL;
506 struct binder_buffer *buffer;
508 BUG_ON(new_buffer->free);
512 buffer = rb_entry(parent, struct binder_buffer, rb_node);
513 BUG_ON(buffer->free);
515 if (new_buffer < buffer)
516 p = &parent->rb_left;
517 else if (new_buffer > buffer)
518 p = &parent->rb_right;
522 rb_link_node(&new_buffer->rb_node, parent, p);
523 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
526 static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
529 struct rb_node *n = proc->allocated_buffers.rb_node;
530 struct binder_buffer *buffer;
531 struct binder_buffer *kern_ptr;
533 kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
534 - offsetof(struct binder_buffer, data));
537 buffer = rb_entry(n, struct binder_buffer, rb_node);
538 BUG_ON(buffer->free);
540 if (kern_ptr < buffer)
542 else if (kern_ptr > buffer)
550 static int binder_update_page_range(struct binder_proc *proc, int allocate,
551 void *start, void *end,
552 struct vm_area_struct *vma)
555 unsigned long user_page_addr;
557 struct mm_struct *mm;
559 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
560 "%d: %s pages %pK-%pK\n", proc->pid,
561 allocate ? "allocate" : "free", start, end);
566 trace_binder_update_page_range(proc, allocate, start, end);
571 mm = get_task_mm(proc->tsk);
574 down_write(&mm->mmap_sem);
575 if (!mmget_still_valid(mm)) {
582 if (vma && mm != proc->vma_vm_mm) {
583 pr_err("%d: vma mm and task mm mismatch\n",
593 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
598 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
601 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
604 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
606 pr_err("%d: binder_alloc_buf failed for page at %pK\n",
607 proc->pid, page_addr);
608 goto err_alloc_page_failed;
610 ret = map_kernel_range_noflush((unsigned long)page_addr,
611 PAGE_SIZE, PAGE_KERNEL, page);
612 flush_cache_vmap((unsigned long)page_addr,
613 (unsigned long)page_addr + PAGE_SIZE);
615 pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
616 proc->pid, page_addr);
617 goto err_map_kernel_failed;
620 (uintptr_t)page_addr + proc->user_buffer_offset;
621 ret = vm_insert_page(vma, user_page_addr, page[0]);
623 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
624 proc->pid, user_page_addr);
625 goto err_vm_insert_page_failed;
627 /* vm_insert_page does not seem to increment the refcount */
630 up_write(&mm->mmap_sem);
636 for (page_addr = end - PAGE_SIZE; page_addr >= start;
637 page_addr -= PAGE_SIZE) {
638 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
640 zap_page_range(vma, (uintptr_t)page_addr +
641 proc->user_buffer_offset, PAGE_SIZE, NULL);
642 err_vm_insert_page_failed:
643 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
644 err_map_kernel_failed:
647 err_alloc_page_failed:
652 up_write(&mm->mmap_sem);
658 static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
660 size_t offsets_size, int is_async)
662 struct rb_node *n = proc->free_buffers.rb_node;
663 struct binder_buffer *buffer;
665 struct rb_node *best_fit = NULL;
670 if (proc->vma == NULL) {
671 pr_err("%d: binder_alloc_buf, no vma\n",
676 size = ALIGN(data_size, sizeof(void *)) +
677 ALIGN(offsets_size, sizeof(void *));
679 if (size < data_size || size < offsets_size) {
680 binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
681 proc->pid, data_size, offsets_size);
686 proc->free_async_space < size + sizeof(struct binder_buffer)) {
687 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
688 "%d: binder_alloc_buf size %zd failed, no async space left\n",
694 buffer = rb_entry(n, struct binder_buffer, rb_node);
695 BUG_ON(!buffer->free);
696 buffer_size = binder_buffer_size(proc, buffer);
698 if (size < buffer_size) {
701 } else if (size > buffer_size)
708 if (best_fit == NULL) {
709 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
714 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
715 buffer_size = binder_buffer_size(proc, buffer);
718 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
719 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
720 proc->pid, size, buffer, buffer_size);
723 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
725 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
726 buffer_size = size; /* no room for other buffers */
728 buffer_size = size + sizeof(struct binder_buffer);
731 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
732 if (end_page_addr > has_page_addr)
733 end_page_addr = has_page_addr;
734 if (binder_update_page_range(proc, 1,
735 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
738 rb_erase(best_fit, &proc->free_buffers);
740 binder_insert_allocated_buffer(proc, buffer);
741 if (buffer_size != size) {
742 struct binder_buffer *new_buffer = (void *)buffer->data + size;
744 list_add(&new_buffer->entry, &buffer->entry);
745 new_buffer->free = 1;
746 binder_insert_free_buffer(proc, new_buffer);
748 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
749 "%d: binder_alloc_buf size %zd got %pK\n",
750 proc->pid, size, buffer);
751 buffer->data_size = data_size;
752 buffer->offsets_size = offsets_size;
753 buffer->async_transaction = is_async;
755 proc->free_async_space -= size + sizeof(struct binder_buffer);
756 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
757 "%d: binder_alloc_buf size %zd async free %zd\n",
758 proc->pid, size, proc->free_async_space);
764 static void *buffer_start_page(struct binder_buffer *buffer)
766 return (void *)((uintptr_t)buffer & PAGE_MASK);
769 static void *buffer_end_page(struct binder_buffer *buffer)
771 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
774 static void binder_delete_free_buffer(struct binder_proc *proc,
775 struct binder_buffer *buffer)
777 struct binder_buffer *prev, *next = NULL;
778 int free_page_end = 1;
779 int free_page_start = 1;
781 BUG_ON(proc->buffers.next == &buffer->entry);
782 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
784 if (buffer_end_page(prev) == buffer_start_page(buffer)) {
786 if (buffer_end_page(prev) == buffer_end_page(buffer))
788 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
789 "%d: merge free, buffer %pK share page with %pK\n",
790 proc->pid, buffer, prev);
793 if (!list_is_last(&buffer->entry, &proc->buffers)) {
794 next = list_entry(buffer->entry.next,
795 struct binder_buffer, entry);
796 if (buffer_start_page(next) == buffer_end_page(buffer)) {
798 if (buffer_start_page(next) ==
799 buffer_start_page(buffer))
801 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
802 "%d: merge free, buffer %pK share page with %pK\n",
803 proc->pid, buffer, prev);
806 list_del(&buffer->entry);
807 if (free_page_start || free_page_end) {
808 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
809 "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
810 proc->pid, buffer, free_page_start ? "" : " end",
811 free_page_end ? "" : " start", prev, next);
812 binder_update_page_range(proc, 0, free_page_start ?
813 buffer_start_page(buffer) : buffer_end_page(buffer),
814 (free_page_end ? buffer_end_page(buffer) :
815 buffer_start_page(buffer)) + PAGE_SIZE, NULL);
819 static void binder_free_buf(struct binder_proc *proc,
820 struct binder_buffer *buffer)
822 size_t size, buffer_size;
824 buffer_size = binder_buffer_size(proc, buffer);
826 size = ALIGN(buffer->data_size, sizeof(void *)) +
827 ALIGN(buffer->offsets_size, sizeof(void *));
829 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
830 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
831 proc->pid, buffer, size, buffer_size);
833 BUG_ON(buffer->free);
834 BUG_ON(size > buffer_size);
835 BUG_ON(buffer->transaction != NULL);
836 BUG_ON((void *)buffer < proc->buffer);
837 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
839 if (buffer->async_transaction) {
840 proc->free_async_space += size + sizeof(struct binder_buffer);
842 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
843 "%d: binder_free_buf size %zd async free %zd\n",
844 proc->pid, size, proc->free_async_space);
847 binder_update_page_range(proc, 0,
848 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
849 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
851 rb_erase(&buffer->rb_node, &proc->allocated_buffers);
853 if (!list_is_last(&buffer->entry, &proc->buffers)) {
854 struct binder_buffer *next = list_entry(buffer->entry.next,
855 struct binder_buffer, entry);
858 rb_erase(&next->rb_node, &proc->free_buffers);
859 binder_delete_free_buffer(proc, next);
862 if (proc->buffers.next != &buffer->entry) {
863 struct binder_buffer *prev = list_entry(buffer->entry.prev,
864 struct binder_buffer, entry);
867 binder_delete_free_buffer(proc, buffer);
868 rb_erase(&prev->rb_node, &proc->free_buffers);
872 binder_insert_free_buffer(proc, buffer);
875 static struct binder_node *binder_get_node(struct binder_proc *proc,
876 binder_uintptr_t ptr)
878 struct rb_node *n = proc->nodes.rb_node;
879 struct binder_node *node;
882 node = rb_entry(n, struct binder_node, rb_node);
886 else if (ptr > node->ptr)
894 static struct binder_node *binder_new_node(struct binder_proc *proc,
895 binder_uintptr_t ptr,
896 binder_uintptr_t cookie)
898 struct rb_node **p = &proc->nodes.rb_node;
899 struct rb_node *parent = NULL;
900 struct binder_node *node;
904 node = rb_entry(parent, struct binder_node, rb_node);
908 else if (ptr > node->ptr)
914 node = kzalloc(sizeof(*node), GFP_KERNEL);
917 binder_stats_created(BINDER_STAT_NODE);
918 rb_link_node(&node->rb_node, parent, p);
919 rb_insert_color(&node->rb_node, &proc->nodes);
920 node->debug_id = ++binder_last_id;
923 node->cookie = cookie;
924 node->work.type = BINDER_WORK_NODE;
925 INIT_LIST_HEAD(&node->work.entry);
926 INIT_LIST_HEAD(&node->async_todo);
927 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
928 "%d:%d node %d u%016llx c%016llx created\n",
929 proc->pid, current->pid, node->debug_id,
930 (u64)node->ptr, (u64)node->cookie);
934 static int binder_inc_node(struct binder_node *node, int strong, int internal,
935 struct list_head *target_list)
939 if (target_list == NULL &&
940 node->internal_strong_refs == 0 &&
941 !(node == binder_context_mgr_node &&
942 node->has_strong_ref)) {
943 pr_err("invalid inc strong node for %d\n",
947 node->internal_strong_refs++;
949 node->local_strong_refs++;
950 if (!node->has_strong_ref && target_list) {
951 list_del_init(&node->work.entry);
952 list_add_tail(&node->work.entry, target_list);
956 node->local_weak_refs++;
957 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
958 if (target_list == NULL) {
959 pr_err("invalid inc weak node for %d\n",
963 list_add_tail(&node->work.entry, target_list);
969 static int binder_dec_node(struct binder_node *node, int strong, int internal)
973 node->internal_strong_refs--;
975 node->local_strong_refs--;
976 if (node->local_strong_refs || node->internal_strong_refs)
980 node->local_weak_refs--;
981 if (node->local_weak_refs || !hlist_empty(&node->refs))
984 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
985 if (list_empty(&node->work.entry)) {
986 list_add_tail(&node->work.entry, &node->proc->todo);
987 wake_up_interruptible(&node->proc->wait);
990 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
991 !node->local_weak_refs) {
992 list_del_init(&node->work.entry);
994 rb_erase(&node->rb_node, &node->proc->nodes);
995 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
996 "refless node %d deleted\n",
999 hlist_del(&node->dead_node);
1000 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1001 "dead node %d deleted\n",
1005 binder_stats_deleted(BINDER_STAT_NODE);
1013 static struct binder_ref *binder_get_ref(struct binder_proc *proc,
1014 u32 desc, bool need_strong_ref)
1016 struct rb_node *n = proc->refs_by_desc.rb_node;
1017 struct binder_ref *ref;
1020 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1022 if (desc < ref->desc) {
1024 } else if (desc > ref->desc) {
1026 } else if (need_strong_ref && !ref->strong) {
1027 binder_user_error("tried to use weak ref as strong ref\n");
1036 static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1037 struct binder_node *node)
1040 struct rb_node **p = &proc->refs_by_node.rb_node;
1041 struct rb_node *parent = NULL;
1042 struct binder_ref *ref, *new_ref;
1046 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1048 if (node < ref->node)
1050 else if (node > ref->node)
1051 p = &(*p)->rb_right;
1055 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1056 if (new_ref == NULL)
1058 binder_stats_created(BINDER_STAT_REF);
1059 new_ref->debug_id = ++binder_last_id;
1060 new_ref->proc = proc;
1061 new_ref->node = node;
1062 rb_link_node(&new_ref->rb_node_node, parent, p);
1063 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1065 new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
1066 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1067 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1068 if (ref->desc > new_ref->desc)
1070 new_ref->desc = ref->desc + 1;
1073 p = &proc->refs_by_desc.rb_node;
1076 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1078 if (new_ref->desc < ref->desc)
1080 else if (new_ref->desc > ref->desc)
1081 p = &(*p)->rb_right;
1085 rb_link_node(&new_ref->rb_node_desc, parent, p);
1086 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1088 hlist_add_head(&new_ref->node_entry, &node->refs);
1090 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1091 "%d new ref %d desc %d for node %d\n",
1092 proc->pid, new_ref->debug_id, new_ref->desc,
1095 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1096 "%d new ref %d desc %d for dead node\n",
1097 proc->pid, new_ref->debug_id, new_ref->desc);
1102 static void binder_delete_ref(struct binder_ref *ref)
1104 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1105 "%d delete ref %d desc %d for node %d\n",
1106 ref->proc->pid, ref->debug_id, ref->desc,
1107 ref->node->debug_id);
1109 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1110 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1112 binder_dec_node(ref->node, 1, 1);
1113 hlist_del(&ref->node_entry);
1114 binder_dec_node(ref->node, 0, 1);
1116 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1117 "%d delete ref %d desc %d has death notification\n",
1118 ref->proc->pid, ref->debug_id, ref->desc);
1119 list_del(&ref->death->work.entry);
1121 binder_stats_deleted(BINDER_STAT_DEATH);
1124 binder_stats_deleted(BINDER_STAT_REF);
1127 static int binder_inc_ref(struct binder_ref *ref, int strong,
1128 struct list_head *target_list)
1133 if (ref->strong == 0) {
1134 ret = binder_inc_node(ref->node, 1, 1, target_list);
1140 if (ref->weak == 0) {
1141 ret = binder_inc_node(ref->node, 0, 1, target_list);
1151 static int binder_dec_ref(struct binder_ref *ref, int strong)
1154 if (ref->strong == 0) {
1155 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1156 ref->proc->pid, ref->debug_id,
1157 ref->desc, ref->strong, ref->weak);
1161 if (ref->strong == 0) {
1164 ret = binder_dec_node(ref->node, strong, 1);
1169 if (ref->weak == 0) {
1170 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1171 ref->proc->pid, ref->debug_id,
1172 ref->desc, ref->strong, ref->weak);
1177 if (ref->strong == 0 && ref->weak == 0)
1178 binder_delete_ref(ref);
1182 static void binder_pop_transaction(struct binder_thread *target_thread,
1183 struct binder_transaction *t)
1185 if (target_thread) {
1186 BUG_ON(target_thread->transaction_stack != t);
1187 BUG_ON(target_thread->transaction_stack->from != target_thread);
1188 target_thread->transaction_stack =
1189 target_thread->transaction_stack->from_parent;
1194 t->buffer->transaction = NULL;
1196 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1199 static void binder_send_failed_reply(struct binder_transaction *t,
1200 uint32_t error_code)
1202 struct binder_thread *target_thread;
1203 struct binder_transaction *next;
1205 BUG_ON(t->flags & TF_ONE_WAY);
1207 target_thread = t->from;
1208 if (target_thread) {
1209 if (target_thread->return_error != BR_OK &&
1210 target_thread->return_error2 == BR_OK) {
1211 target_thread->return_error2 =
1212 target_thread->return_error;
1213 target_thread->return_error = BR_OK;
1215 if (target_thread->return_error == BR_OK) {
1216 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1217 "send failed reply for transaction %d to %d:%d\n",
1219 target_thread->proc->pid,
1220 target_thread->pid);
1222 binder_pop_transaction(target_thread, t);
1223 target_thread->return_error = error_code;
1224 wake_up_interruptible(&target_thread->wait);
1226 pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
1227 target_thread->proc->pid,
1229 target_thread->return_error);
1233 next = t->from_parent;
1235 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1236 "send failed reply for transaction %d, target dead\n",
1239 binder_pop_transaction(target_thread, t);
1241 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1242 "reply failed, no target thread at root\n");
1246 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1247 "reply failed, no target thread -- retry %d\n",
1252 static void binder_transaction_buffer_release(struct binder_proc *proc,
1253 struct binder_buffer *buffer,
1254 binder_size_t *failed_at)
1256 binder_size_t *offp, *off_end;
1257 int debug_id = buffer->debug_id;
1259 binder_debug(BINDER_DEBUG_TRANSACTION,
1260 "%d buffer release %d, size %zd-%zd, failed at %pK\n",
1261 proc->pid, buffer->debug_id,
1262 buffer->data_size, buffer->offsets_size, failed_at);
1264 if (buffer->target_node)
1265 binder_dec_node(buffer->target_node, 1, 0);
1267 offp = (binder_size_t *)(buffer->data +
1268 ALIGN(buffer->data_size, sizeof(void *)));
1270 off_end = failed_at;
1272 off_end = (void *)offp + buffer->offsets_size;
1273 for (; offp < off_end; offp++) {
1274 struct flat_binder_object *fp;
1276 if (*offp > buffer->data_size - sizeof(*fp) ||
1277 buffer->data_size < sizeof(*fp) ||
1278 !IS_ALIGNED(*offp, sizeof(u32))) {
1279 pr_err("transaction release %d bad offset %lld, size %zd\n",
1280 debug_id, (u64)*offp, buffer->data_size);
1283 fp = (struct flat_binder_object *)(buffer->data + *offp);
1285 case BINDER_TYPE_BINDER:
1286 case BINDER_TYPE_WEAK_BINDER: {
1287 struct binder_node *node = binder_get_node(proc, fp->binder);
1290 pr_err("transaction release %d bad node %016llx\n",
1291 debug_id, (u64)fp->binder);
1294 binder_debug(BINDER_DEBUG_TRANSACTION,
1295 " node %d u%016llx\n",
1296 node->debug_id, (u64)node->ptr);
1297 binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
1299 case BINDER_TYPE_HANDLE:
1300 case BINDER_TYPE_WEAK_HANDLE: {
1301 struct binder_ref *ref;
1303 ref = binder_get_ref(proc, fp->handle,
1304 fp->type == BINDER_TYPE_HANDLE);
1307 pr_err("transaction release %d bad handle %d\n",
1308 debug_id, fp->handle);
1311 binder_debug(BINDER_DEBUG_TRANSACTION,
1312 " ref %d desc %d (node %d)\n",
1313 ref->debug_id, ref->desc, ref->node->debug_id);
1314 binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
1317 case BINDER_TYPE_FD:
1318 binder_debug(BINDER_DEBUG_TRANSACTION,
1319 " fd %d\n", fp->handle);
1321 task_close_fd(proc, fp->handle);
1325 pr_err("transaction release %d bad object type %x\n",
1326 debug_id, fp->type);
1332 static void binder_transaction(struct binder_proc *proc,
1333 struct binder_thread *thread,
1334 struct binder_transaction_data *tr, int reply)
1336 struct binder_transaction *t;
1337 struct binder_work *tcomplete;
1338 binder_size_t *offp, *off_end;
1339 struct binder_proc *target_proc;
1340 struct binder_thread *target_thread = NULL;
1341 struct binder_node *target_node = NULL;
1342 struct list_head *target_list;
1343 wait_queue_head_t *target_wait;
1344 struct binder_transaction *in_reply_to = NULL;
1345 struct binder_transaction_log_entry *e;
1346 uint32_t return_error;
1348 e = binder_transaction_log_add(&binder_transaction_log);
1349 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1350 e->from_proc = proc->pid;
1351 e->from_thread = thread->pid;
1352 e->target_handle = tr->target.handle;
1353 e->data_size = tr->data_size;
1354 e->offsets_size = tr->offsets_size;
1357 in_reply_to = thread->transaction_stack;
1358 if (in_reply_to == NULL) {
1359 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
1360 proc->pid, thread->pid);
1361 return_error = BR_FAILED_REPLY;
1362 goto err_empty_call_stack;
1364 binder_set_nice(in_reply_to->saved_priority);
1365 if (in_reply_to->to_thread != thread) {
1366 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
1367 proc->pid, thread->pid, in_reply_to->debug_id,
1368 in_reply_to->to_proc ?
1369 in_reply_to->to_proc->pid : 0,
1370 in_reply_to->to_thread ?
1371 in_reply_to->to_thread->pid : 0);
1372 return_error = BR_FAILED_REPLY;
1374 goto err_bad_call_stack;
1376 thread->transaction_stack = in_reply_to->to_parent;
1377 target_thread = in_reply_to->from;
1378 if (target_thread == NULL) {
1379 return_error = BR_DEAD_REPLY;
1380 goto err_dead_binder;
1382 if (target_thread->transaction_stack != in_reply_to) {
1383 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
1384 proc->pid, thread->pid,
1385 target_thread->transaction_stack ?
1386 target_thread->transaction_stack->debug_id : 0,
1387 in_reply_to->debug_id);
1388 return_error = BR_FAILED_REPLY;
1390 target_thread = NULL;
1391 goto err_dead_binder;
1393 target_proc = target_thread->proc;
1395 if (tr->target.handle) {
1396 struct binder_ref *ref;
1398 ref = binder_get_ref(proc, tr->target.handle, true);
1400 binder_user_error("%d:%d got transaction to invalid handle\n",
1401 proc->pid, thread->pid);
1402 return_error = BR_FAILED_REPLY;
1403 goto err_invalid_target_handle;
1405 target_node = ref->node;
1407 target_node = binder_context_mgr_node;
1408 if (target_node == NULL) {
1409 return_error = BR_DEAD_REPLY;
1410 goto err_no_context_mgr_node;
1413 e->to_node = target_node->debug_id;
1414 target_proc = target_node->proc;
1415 if (target_proc == NULL) {
1416 return_error = BR_DEAD_REPLY;
1417 goto err_dead_binder;
1419 if (WARN_ON(proc == target_proc)) {
1420 return_error = BR_FAILED_REPLY;
1421 goto err_invalid_target_handle;
1423 if (security_binder_transaction(proc->cred,
1424 target_proc->cred) < 0) {
1425 return_error = BR_FAILED_REPLY;
1426 goto err_invalid_target_handle;
1428 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1429 struct binder_transaction *tmp;
1431 tmp = thread->transaction_stack;
1432 if (tmp->to_thread != thread) {
1433 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
1434 proc->pid, thread->pid, tmp->debug_id,
1435 tmp->to_proc ? tmp->to_proc->pid : 0,
1437 tmp->to_thread->pid : 0);
1438 return_error = BR_FAILED_REPLY;
1439 goto err_bad_call_stack;
1442 if (tmp->from && tmp->from->proc == target_proc)
1443 target_thread = tmp->from;
1444 tmp = tmp->from_parent;
1448 if (target_thread) {
1449 e->to_thread = target_thread->pid;
1450 target_list = &target_thread->todo;
1451 target_wait = &target_thread->wait;
1453 target_list = &target_proc->todo;
1454 target_wait = &target_proc->wait;
1456 e->to_proc = target_proc->pid;
1458 /* TODO: reuse incoming transaction for reply */
1459 t = kzalloc(sizeof(*t), GFP_KERNEL);
1461 return_error = BR_FAILED_REPLY;
1462 goto err_alloc_t_failed;
1464 binder_stats_created(BINDER_STAT_TRANSACTION);
1466 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1467 if (tcomplete == NULL) {
1468 return_error = BR_FAILED_REPLY;
1469 goto err_alloc_tcomplete_failed;
1471 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1473 t->debug_id = ++binder_last_id;
1474 e->debug_id = t->debug_id;
1477 binder_debug(BINDER_DEBUG_TRANSACTION,
1478 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
1479 proc->pid, thread->pid, t->debug_id,
1480 target_proc->pid, target_thread->pid,
1481 (u64)tr->data.ptr.buffer,
1482 (u64)tr->data.ptr.offsets,
1483 (u64)tr->data_size, (u64)tr->offsets_size);
1485 binder_debug(BINDER_DEBUG_TRANSACTION,
1486 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
1487 proc->pid, thread->pid, t->debug_id,
1488 target_proc->pid, target_node->debug_id,
1489 (u64)tr->data.ptr.buffer,
1490 (u64)tr->data.ptr.offsets,
1491 (u64)tr->data_size, (u64)tr->offsets_size);
1493 if (!reply && !(tr->flags & TF_ONE_WAY))
1497 t->sender_euid = task_euid(proc->tsk);
1498 t->to_proc = target_proc;
1499 t->to_thread = target_thread;
1501 t->flags = tr->flags;
1502 t->priority = task_nice(current);
1504 trace_binder_transaction(reply, t, target_node);
1506 t->buffer = binder_alloc_buf(target_proc, tr->data_size,
1507 tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
1508 if (t->buffer == NULL) {
1509 return_error = BR_FAILED_REPLY;
1510 goto err_binder_alloc_buf_failed;
1512 t->buffer->allow_user_free = 0;
1513 t->buffer->debug_id = t->debug_id;
1514 t->buffer->transaction = t;
1515 t->buffer->target_node = target_node;
1516 trace_binder_transaction_alloc_buf(t->buffer);
1518 binder_inc_node(target_node, 1, 0, NULL);
1520 offp = (binder_size_t *)(t->buffer->data +
1521 ALIGN(tr->data_size, sizeof(void *)));
1523 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
1524 tr->data.ptr.buffer, tr->data_size)) {
1525 binder_user_error("%d:%d got transaction with invalid data ptr\n",
1526 proc->pid, thread->pid);
1527 return_error = BR_FAILED_REPLY;
1528 goto err_copy_data_failed;
1530 if (copy_from_user(offp, (const void __user *)(uintptr_t)
1531 tr->data.ptr.offsets, tr->offsets_size)) {
1532 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1533 proc->pid, thread->pid);
1534 return_error = BR_FAILED_REPLY;
1535 goto err_copy_data_failed;
1537 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
1538 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
1539 proc->pid, thread->pid, (u64)tr->offsets_size);
1540 return_error = BR_FAILED_REPLY;
1541 goto err_bad_offset;
1543 off_end = (void *)offp + tr->offsets_size;
1544 for (; offp < off_end; offp++) {
1545 struct flat_binder_object *fp;
1547 if (*offp > t->buffer->data_size - sizeof(*fp) ||
1548 t->buffer->data_size < sizeof(*fp) ||
1549 !IS_ALIGNED(*offp, sizeof(u32))) {
1550 binder_user_error("%d:%d got transaction with invalid offset, %lld\n",
1551 proc->pid, thread->pid, (u64)*offp);
1552 return_error = BR_FAILED_REPLY;
1553 goto err_bad_offset;
1555 fp = (struct flat_binder_object *)(t->buffer->data + *offp);
1557 case BINDER_TYPE_BINDER:
1558 case BINDER_TYPE_WEAK_BINDER: {
1559 struct binder_ref *ref;
1560 struct binder_node *node = binder_get_node(proc, fp->binder);
1563 node = binder_new_node(proc, fp->binder, fp->cookie);
1565 return_error = BR_FAILED_REPLY;
1566 goto err_binder_new_node_failed;
1568 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1569 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1571 if (fp->cookie != node->cookie) {
1572 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1573 proc->pid, thread->pid,
1574 (u64)fp->binder, node->debug_id,
1575 (u64)fp->cookie, (u64)node->cookie);
1576 return_error = BR_FAILED_REPLY;
1577 goto err_binder_get_ref_for_node_failed;
1579 if (security_binder_transfer_binder(proc->cred,
1580 target_proc->cred)) {
1581 return_error = BR_FAILED_REPLY;
1582 goto err_binder_get_ref_for_node_failed;
1584 ref = binder_get_ref_for_node(target_proc, node);
1586 return_error = BR_FAILED_REPLY;
1587 goto err_binder_get_ref_for_node_failed;
1589 if (fp->type == BINDER_TYPE_BINDER)
1590 fp->type = BINDER_TYPE_HANDLE;
1592 fp->type = BINDER_TYPE_WEAK_HANDLE;
1594 fp->handle = ref->desc;
1596 binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
1599 trace_binder_transaction_node_to_ref(t, node, ref);
1600 binder_debug(BINDER_DEBUG_TRANSACTION,
1601 " node %d u%016llx -> ref %d desc %d\n",
1602 node->debug_id, (u64)node->ptr,
1603 ref->debug_id, ref->desc);
1605 case BINDER_TYPE_HANDLE:
1606 case BINDER_TYPE_WEAK_HANDLE: {
1607 struct binder_ref *ref;
1609 ref = binder_get_ref(proc, fp->handle,
1610 fp->type == BINDER_TYPE_HANDLE);
1613 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1615 thread->pid, fp->handle);
1616 return_error = BR_FAILED_REPLY;
1617 goto err_binder_get_ref_failed;
1619 if (security_binder_transfer_binder(proc->cred,
1620 target_proc->cred)) {
1621 return_error = BR_FAILED_REPLY;
1622 goto err_binder_get_ref_failed;
1624 if (ref->node->proc == target_proc) {
1625 if (fp->type == BINDER_TYPE_HANDLE)
1626 fp->type = BINDER_TYPE_BINDER;
1628 fp->type = BINDER_TYPE_WEAK_BINDER;
1629 fp->binder = ref->node->ptr;
1630 fp->cookie = ref->node->cookie;
1631 binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
1632 trace_binder_transaction_ref_to_node(t, ref);
1633 binder_debug(BINDER_DEBUG_TRANSACTION,
1634 " ref %d desc %d -> node %d u%016llx\n",
1635 ref->debug_id, ref->desc, ref->node->debug_id,
1636 (u64)ref->node->ptr);
1638 struct binder_ref *new_ref;
1640 new_ref = binder_get_ref_for_node(target_proc, ref->node);
1641 if (new_ref == NULL) {
1642 return_error = BR_FAILED_REPLY;
1643 goto err_binder_get_ref_for_node_failed;
1646 fp->handle = new_ref->desc;
1648 binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
1649 trace_binder_transaction_ref_to_ref(t, ref,
1651 binder_debug(BINDER_DEBUG_TRANSACTION,
1652 " ref %d desc %d -> ref %d desc %d (node %d)\n",
1653 ref->debug_id, ref->desc, new_ref->debug_id,
1654 new_ref->desc, ref->node->debug_id);
1658 case BINDER_TYPE_FD: {
1663 if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
1664 binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
1665 proc->pid, thread->pid, fp->handle);
1666 return_error = BR_FAILED_REPLY;
1667 goto err_fd_not_allowed;
1669 } else if (!target_node->accept_fds) {
1670 binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
1671 proc->pid, thread->pid, fp->handle);
1672 return_error = BR_FAILED_REPLY;
1673 goto err_fd_not_allowed;
1676 file = fget(fp->handle);
1678 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1679 proc->pid, thread->pid, fp->handle);
1680 return_error = BR_FAILED_REPLY;
1681 goto err_fget_failed;
1683 if (security_binder_transfer_file(proc->cred,
1687 return_error = BR_FAILED_REPLY;
1688 goto err_get_unused_fd_failed;
1690 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1691 if (target_fd < 0) {
1693 return_error = BR_FAILED_REPLY;
1694 goto err_get_unused_fd_failed;
1696 task_fd_install(target_proc, target_fd, file);
1697 trace_binder_transaction_fd(t, fp->handle, target_fd);
1698 binder_debug(BINDER_DEBUG_TRANSACTION,
1699 " fd %d -> %d\n", fp->handle, target_fd);
1702 fp->handle = target_fd;
1706 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
1707 proc->pid, thread->pid, fp->type);
1708 return_error = BR_FAILED_REPLY;
1709 goto err_bad_object_type;
1713 BUG_ON(t->buffer->async_transaction != 0);
1714 binder_pop_transaction(target_thread, in_reply_to);
1715 } else if (!(t->flags & TF_ONE_WAY)) {
1716 BUG_ON(t->buffer->async_transaction != 0);
1718 t->from_parent = thread->transaction_stack;
1719 thread->transaction_stack = t;
1721 BUG_ON(target_node == NULL);
1722 BUG_ON(t->buffer->async_transaction != 1);
1723 if (target_node->has_async_transaction) {
1724 target_list = &target_node->async_todo;
1727 target_node->has_async_transaction = 1;
1729 t->work.type = BINDER_WORK_TRANSACTION;
1730 list_add_tail(&t->work.entry, target_list);
1731 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1732 list_add_tail(&tcomplete->entry, &thread->todo);
1734 if (reply || !(t->flags & TF_ONE_WAY))
1735 wake_up_interruptible_sync(target_wait);
1737 wake_up_interruptible(target_wait);
1741 err_get_unused_fd_failed:
1744 err_binder_get_ref_for_node_failed:
1745 err_binder_get_ref_failed:
1746 err_binder_new_node_failed:
1747 err_bad_object_type:
1749 err_copy_data_failed:
1750 trace_binder_transaction_failed_buffer_release(t->buffer);
1751 binder_transaction_buffer_release(target_proc, t->buffer, offp);
1752 t->buffer->transaction = NULL;
1753 binder_free_buf(target_proc, t->buffer);
1754 err_binder_alloc_buf_failed:
1756 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
1757 err_alloc_tcomplete_failed:
1759 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1762 err_empty_call_stack:
1764 err_invalid_target_handle:
1765 err_no_context_mgr_node:
1766 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1767 "%d:%d transaction failed %d, size %lld-%lld\n",
1768 proc->pid, thread->pid, return_error,
1769 (u64)tr->data_size, (u64)tr->offsets_size);
1772 struct binder_transaction_log_entry *fe;
1774 fe = binder_transaction_log_add(&binder_transaction_log_failed);
1778 BUG_ON(thread->return_error != BR_OK);
1780 thread->return_error = BR_TRANSACTION_COMPLETE;
1781 binder_send_failed_reply(in_reply_to, return_error);
1783 thread->return_error = return_error;
1786 static int binder_thread_write(struct binder_proc *proc,
1787 struct binder_thread *thread,
1788 binder_uintptr_t binder_buffer, size_t size,
1789 binder_size_t *consumed)
1792 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
1793 void __user *ptr = buffer + *consumed;
1794 void __user *end = buffer + size;
1796 while (ptr < end && thread->return_error == BR_OK) {
1797 if (get_user(cmd, (uint32_t __user *)ptr))
1799 ptr += sizeof(uint32_t);
1800 trace_binder_command(cmd);
1801 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
1802 binder_stats.bc[_IOC_NR(cmd)]++;
1803 proc->stats.bc[_IOC_NR(cmd)]++;
1804 thread->stats.bc[_IOC_NR(cmd)]++;
1812 struct binder_ref *ref;
1813 const char *debug_string;
1815 if (get_user(target, (uint32_t __user *)ptr))
1817 ptr += sizeof(uint32_t);
1818 if (target == 0 && binder_context_mgr_node &&
1819 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
1820 if (binder_context_mgr_node->proc == proc) {
1821 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
1822 proc->pid, thread->pid);
1825 ref = binder_get_ref_for_node(proc,
1826 binder_context_mgr_node);
1827 if (ref->desc != target) {
1828 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
1829 proc->pid, thread->pid,
1833 ref = binder_get_ref(proc, target,
1834 cmd == BC_ACQUIRE ||
1837 binder_user_error("%d:%d refcount change on invalid ref %d\n",
1838 proc->pid, thread->pid, target);
1843 debug_string = "IncRefs";
1844 binder_inc_ref(ref, 0, NULL);
1847 debug_string = "Acquire";
1848 binder_inc_ref(ref, 1, NULL);
1851 debug_string = "Release";
1852 binder_dec_ref(ref, 1);
1856 debug_string = "DecRefs";
1857 binder_dec_ref(ref, 0);
1860 binder_debug(BINDER_DEBUG_USER_REFS,
1861 "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
1862 proc->pid, thread->pid, debug_string, ref->debug_id,
1863 ref->desc, ref->strong, ref->weak, ref->node->debug_id);
1866 case BC_INCREFS_DONE:
1867 case BC_ACQUIRE_DONE: {
1868 binder_uintptr_t node_ptr;
1869 binder_uintptr_t cookie;
1870 struct binder_node *node;
1872 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
1874 ptr += sizeof(binder_uintptr_t);
1875 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
1877 ptr += sizeof(binder_uintptr_t);
1878 node = binder_get_node(proc, node_ptr);
1880 binder_user_error("%d:%d %s u%016llx no match\n",
1881 proc->pid, thread->pid,
1882 cmd == BC_INCREFS_DONE ?
1888 if (cookie != node->cookie) {
1889 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
1890 proc->pid, thread->pid,
1891 cmd == BC_INCREFS_DONE ?
1892 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1893 (u64)node_ptr, node->debug_id,
1894 (u64)cookie, (u64)node->cookie);
1897 if (cmd == BC_ACQUIRE_DONE) {
1898 if (node->pending_strong_ref == 0) {
1899 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
1900 proc->pid, thread->pid,
1904 node->pending_strong_ref = 0;
1906 if (node->pending_weak_ref == 0) {
1907 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
1908 proc->pid, thread->pid,
1912 node->pending_weak_ref = 0;
1914 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
1915 binder_debug(BINDER_DEBUG_USER_REFS,
1916 "%d:%d %s node %d ls %d lw %d\n",
1917 proc->pid, thread->pid,
1918 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1919 node->debug_id, node->local_strong_refs, node->local_weak_refs);
1922 case BC_ATTEMPT_ACQUIRE:
1923 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
1925 case BC_ACQUIRE_RESULT:
1926 pr_err("BC_ACQUIRE_RESULT not supported\n");
1929 case BC_FREE_BUFFER: {
1930 binder_uintptr_t data_ptr;
1931 struct binder_buffer *buffer;
1933 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
1935 ptr += sizeof(binder_uintptr_t);
1937 buffer = binder_buffer_lookup(proc, data_ptr);
1938 if (buffer == NULL) {
1939 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
1940 proc->pid, thread->pid, (u64)data_ptr);
1943 if (!buffer->allow_user_free) {
1944 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
1945 proc->pid, thread->pid, (u64)data_ptr);
1948 binder_debug(BINDER_DEBUG_FREE_BUFFER,
1949 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
1950 proc->pid, thread->pid, (u64)data_ptr,
1952 buffer->transaction ? "active" : "finished");
1954 if (buffer->transaction) {
1955 buffer->transaction->buffer = NULL;
1956 buffer->transaction = NULL;
1958 if (buffer->async_transaction && buffer->target_node) {
1959 BUG_ON(!buffer->target_node->has_async_transaction);
1960 if (list_empty(&buffer->target_node->async_todo))
1961 buffer->target_node->has_async_transaction = 0;
1963 list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
1965 trace_binder_transaction_buffer_release(buffer);
1966 binder_transaction_buffer_release(proc, buffer, NULL);
1967 binder_free_buf(proc, buffer);
1971 case BC_TRANSACTION:
1973 struct binder_transaction_data tr;
1975 if (copy_from_user(&tr, ptr, sizeof(tr)))
1978 binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
1982 case BC_REGISTER_LOOPER:
1983 binder_debug(BINDER_DEBUG_THREADS,
1984 "%d:%d BC_REGISTER_LOOPER\n",
1985 proc->pid, thread->pid);
1986 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
1987 thread->looper |= BINDER_LOOPER_STATE_INVALID;
1988 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
1989 proc->pid, thread->pid);
1990 } else if (proc->requested_threads == 0) {
1991 thread->looper |= BINDER_LOOPER_STATE_INVALID;
1992 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
1993 proc->pid, thread->pid);
1995 proc->requested_threads--;
1996 proc->requested_threads_started++;
1998 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
2000 case BC_ENTER_LOOPER:
2001 binder_debug(BINDER_DEBUG_THREADS,
2002 "%d:%d BC_ENTER_LOOPER\n",
2003 proc->pid, thread->pid);
2004 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
2005 thread->looper |= BINDER_LOOPER_STATE_INVALID;
2006 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
2007 proc->pid, thread->pid);
2009 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
2011 case BC_EXIT_LOOPER:
2012 binder_debug(BINDER_DEBUG_THREADS,
2013 "%d:%d BC_EXIT_LOOPER\n",
2014 proc->pid, thread->pid);
2015 thread->looper |= BINDER_LOOPER_STATE_EXITED;
2018 case BC_REQUEST_DEATH_NOTIFICATION:
2019 case BC_CLEAR_DEATH_NOTIFICATION: {
2021 binder_uintptr_t cookie;
2022 struct binder_ref *ref;
2023 struct binder_ref_death *death;
2025 if (get_user(target, (uint32_t __user *)ptr))
2027 ptr += sizeof(uint32_t);
2028 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2030 ptr += sizeof(binder_uintptr_t);
2031 ref = binder_get_ref(proc, target, false);
2033 binder_user_error("%d:%d %s invalid ref %d\n",
2034 proc->pid, thread->pid,
2035 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2036 "BC_REQUEST_DEATH_NOTIFICATION" :
2037 "BC_CLEAR_DEATH_NOTIFICATION",
2042 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2043 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
2044 proc->pid, thread->pid,
2045 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2046 "BC_REQUEST_DEATH_NOTIFICATION" :
2047 "BC_CLEAR_DEATH_NOTIFICATION",
2048 (u64)cookie, ref->debug_id, ref->desc,
2049 ref->strong, ref->weak, ref->node->debug_id);
2051 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2053 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
2054 proc->pid, thread->pid);
2057 death = kzalloc(sizeof(*death), GFP_KERNEL);
2058 if (death == NULL) {
2059 thread->return_error = BR_ERROR;
2060 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2061 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
2062 proc->pid, thread->pid);
2065 binder_stats_created(BINDER_STAT_DEATH);
2066 INIT_LIST_HEAD(&death->work.entry);
2067 death->cookie = cookie;
2069 if (ref->node->proc == NULL) {
2070 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2071 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2072 list_add_tail(&ref->death->work.entry, &thread->todo);
2074 list_add_tail(&ref->death->work.entry, &proc->todo);
2075 wake_up_interruptible(&proc->wait);
2079 if (ref->death == NULL) {
2080 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
2081 proc->pid, thread->pid);
2085 if (death->cookie != cookie) {
2086 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
2087 proc->pid, thread->pid,
2093 if (list_empty(&death->work.entry)) {
2094 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2095 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2096 list_add_tail(&death->work.entry, &thread->todo);
2098 list_add_tail(&death->work.entry, &proc->todo);
2099 wake_up_interruptible(&proc->wait);
2102 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2103 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2107 case BC_DEAD_BINDER_DONE: {
2108 struct binder_work *w;
2109 binder_uintptr_t cookie;
2110 struct binder_ref_death *death = NULL;
2112 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2115 ptr += sizeof(cookie);
2116 list_for_each_entry(w, &proc->delivered_death, entry) {
2117 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2119 if (tmp_death->cookie == cookie) {
2124 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2125 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
2126 proc->pid, thread->pid, (u64)cookie,
2128 if (death == NULL) {
2129 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2130 proc->pid, thread->pid, (u64)cookie);
2134 list_del_init(&death->work.entry);
2135 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2136 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2137 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2138 list_add_tail(&death->work.entry, &thread->todo);
2140 list_add_tail(&death->work.entry, &proc->todo);
2141 wake_up_interruptible(&proc->wait);
2147 pr_err("%d:%d unknown command %d\n",
2148 proc->pid, thread->pid, cmd);
2151 *consumed = ptr - buffer;
2156 static void binder_stat_br(struct binder_proc *proc,
2157 struct binder_thread *thread, uint32_t cmd)
2159 trace_binder_return(cmd);
2160 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2161 binder_stats.br[_IOC_NR(cmd)]++;
2162 proc->stats.br[_IOC_NR(cmd)]++;
2163 thread->stats.br[_IOC_NR(cmd)]++;
2167 static int binder_has_proc_work(struct binder_proc *proc,
2168 struct binder_thread *thread)
2170 return !list_empty(&proc->todo) ||
2171 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2174 static int binder_has_thread_work(struct binder_thread *thread)
2176 return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2177 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2180 static int binder_thread_read(struct binder_proc *proc,
2181 struct binder_thread *thread,
2182 binder_uintptr_t binder_buffer, size_t size,
2183 binder_size_t *consumed, int non_block)
2185 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
2186 void __user *ptr = buffer + *consumed;
2187 void __user *end = buffer + size;
2190 int wait_for_proc_work;
2192 if (*consumed == 0) {
2193 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2195 ptr += sizeof(uint32_t);
2199 wait_for_proc_work = thread->transaction_stack == NULL &&
2200 list_empty(&thread->todo);
2202 if (thread->return_error != BR_OK && ptr < end) {
2203 if (thread->return_error2 != BR_OK) {
2204 if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2206 ptr += sizeof(uint32_t);
2207 binder_stat_br(proc, thread, thread->return_error2);
2210 thread->return_error2 = BR_OK;
2212 if (put_user(thread->return_error, (uint32_t __user *)ptr))
2214 ptr += sizeof(uint32_t);
2215 binder_stat_br(proc, thread, thread->return_error);
2216 thread->return_error = BR_OK;
2221 thread->looper |= BINDER_LOOPER_STATE_WAITING;
2222 if (wait_for_proc_work)
2223 proc->ready_threads++;
2225 binder_unlock(__func__);
2227 trace_binder_wait_for_work(wait_for_proc_work,
2228 !!thread->transaction_stack,
2229 !list_empty(&thread->todo));
2230 if (wait_for_proc_work) {
2231 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2232 BINDER_LOOPER_STATE_ENTERED))) {
2233 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
2234 proc->pid, thread->pid, thread->looper);
2235 wait_event_interruptible(binder_user_error_wait,
2236 binder_stop_on_user_error < 2);
2238 binder_set_nice(proc->default_priority);
2240 if (!binder_has_proc_work(proc, thread))
2243 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2246 if (!binder_has_thread_work(thread))
2249 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
2252 binder_lock(__func__);
2254 if (wait_for_proc_work)
2255 proc->ready_threads--;
2256 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2263 struct binder_transaction_data tr;
2264 struct binder_work *w;
2265 struct binder_transaction *t = NULL;
2267 if (!list_empty(&thread->todo)) {
2268 w = list_first_entry(&thread->todo, struct binder_work,
2270 } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
2271 w = list_first_entry(&proc->todo, struct binder_work,
2275 if (ptr - buffer == 4 &&
2276 !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
2281 if (end - ptr < sizeof(tr) + 4)
2285 case BINDER_WORK_TRANSACTION: {
2286 t = container_of(w, struct binder_transaction, work);
2288 case BINDER_WORK_TRANSACTION_COMPLETE: {
2289 cmd = BR_TRANSACTION_COMPLETE;
2290 if (put_user(cmd, (uint32_t __user *)ptr))
2292 ptr += sizeof(uint32_t);
2294 binder_stat_br(proc, thread, cmd);
2295 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
2296 "%d:%d BR_TRANSACTION_COMPLETE\n",
2297 proc->pid, thread->pid);
2299 list_del(&w->entry);
2301 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2303 case BINDER_WORK_NODE: {
2304 struct binder_node *node = container_of(w, struct binder_node, work);
2305 uint32_t cmd = BR_NOOP;
2306 const char *cmd_name;
2307 int strong = node->internal_strong_refs || node->local_strong_refs;
2308 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
2310 if (weak && !node->has_weak_ref) {
2312 cmd_name = "BR_INCREFS";
2313 node->has_weak_ref = 1;
2314 node->pending_weak_ref = 1;
2315 node->local_weak_refs++;
2316 } else if (strong && !node->has_strong_ref) {
2318 cmd_name = "BR_ACQUIRE";
2319 node->has_strong_ref = 1;
2320 node->pending_strong_ref = 1;
2321 node->local_strong_refs++;
2322 } else if (!strong && node->has_strong_ref) {
2324 cmd_name = "BR_RELEASE";
2325 node->has_strong_ref = 0;
2326 } else if (!weak && node->has_weak_ref) {
2328 cmd_name = "BR_DECREFS";
2329 node->has_weak_ref = 0;
2331 if (cmd != BR_NOOP) {
2332 if (put_user(cmd, (uint32_t __user *)ptr))
2334 ptr += sizeof(uint32_t);
2335 if (put_user(node->ptr,
2336 (binder_uintptr_t __user *)ptr))
2338 ptr += sizeof(binder_uintptr_t);
2339 if (put_user(node->cookie,
2340 (binder_uintptr_t __user *)ptr))
2342 ptr += sizeof(binder_uintptr_t);
2344 binder_stat_br(proc, thread, cmd);
2345 binder_debug(BINDER_DEBUG_USER_REFS,
2346 "%d:%d %s %d u%016llx c%016llx\n",
2347 proc->pid, thread->pid, cmd_name,
2349 (u64)node->ptr, (u64)node->cookie);
2351 list_del_init(&w->entry);
2352 if (!weak && !strong) {
2353 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2354 "%d:%d node %d u%016llx c%016llx deleted\n",
2355 proc->pid, thread->pid,
2359 rb_erase(&node->rb_node, &proc->nodes);
2361 binder_stats_deleted(BINDER_STAT_NODE);
2363 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2364 "%d:%d node %d u%016llx c%016llx state unchanged\n",
2365 proc->pid, thread->pid,
2372 case BINDER_WORK_DEAD_BINDER:
2373 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2374 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2375 struct binder_ref_death *death;
2378 death = container_of(w, struct binder_ref_death, work);
2379 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2380 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2382 cmd = BR_DEAD_BINDER;
2383 if (put_user(cmd, (uint32_t __user *)ptr))
2385 ptr += sizeof(uint32_t);
2386 if (put_user(death->cookie,
2387 (binder_uintptr_t __user *)ptr))
2389 ptr += sizeof(binder_uintptr_t);
2390 binder_stat_br(proc, thread, cmd);
2391 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2392 "%d:%d %s %016llx\n",
2393 proc->pid, thread->pid,
2394 cmd == BR_DEAD_BINDER ?
2396 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
2397 (u64)death->cookie);
2399 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2400 list_del(&w->entry);
2402 binder_stats_deleted(BINDER_STAT_DEATH);
2404 list_move(&w->entry, &proc->delivered_death);
2405 if (cmd == BR_DEAD_BINDER)
2406 goto done; /* DEAD_BINDER notifications can cause transactions */
2413 BUG_ON(t->buffer == NULL);
2414 if (t->buffer->target_node) {
2415 struct binder_node *target_node = t->buffer->target_node;
2417 tr.target.ptr = target_node->ptr;
2418 tr.cookie = target_node->cookie;
2419 t->saved_priority = task_nice(current);
2420 if (t->priority < target_node->min_priority &&
2421 !(t->flags & TF_ONE_WAY))
2422 binder_set_nice(t->priority);
2423 else if (!(t->flags & TF_ONE_WAY) ||
2424 t->saved_priority > target_node->min_priority)
2425 binder_set_nice(target_node->min_priority);
2426 cmd = BR_TRANSACTION;
2433 tr.flags = t->flags;
2434 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
2437 struct task_struct *sender = t->from->proc->tsk;
2439 tr.sender_pid = task_tgid_nr_ns(sender,
2440 task_active_pid_ns(current));
2445 tr.data_size = t->buffer->data_size;
2446 tr.offsets_size = t->buffer->offsets_size;
2447 tr.data.ptr.buffer = (binder_uintptr_t)(
2448 (uintptr_t)t->buffer->data +
2449 proc->user_buffer_offset);
2450 tr.data.ptr.offsets = tr.data.ptr.buffer +
2451 ALIGN(t->buffer->data_size,
2454 if (put_user(cmd, (uint32_t __user *)ptr))
2456 ptr += sizeof(uint32_t);
2457 if (copy_to_user(ptr, &tr, sizeof(tr)))
2461 trace_binder_transaction_received(t);
2462 binder_stat_br(proc, thread, cmd);
2463 binder_debug(BINDER_DEBUG_TRANSACTION,
2464 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
2465 proc->pid, thread->pid,
2466 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2468 t->debug_id, t->from ? t->from->proc->pid : 0,
2469 t->from ? t->from->pid : 0, cmd,
2470 t->buffer->data_size, t->buffer->offsets_size,
2471 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
2473 list_del(&t->work.entry);
2474 t->buffer->allow_user_free = 1;
2475 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2476 t->to_parent = thread->transaction_stack;
2477 t->to_thread = thread;
2478 thread->transaction_stack = t;
2480 t->buffer->transaction = NULL;
2482 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2489 *consumed = ptr - buffer;
2490 if (proc->requested_threads + proc->ready_threads == 0 &&
2491 proc->requested_threads_started < proc->max_threads &&
2492 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2493 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2494 /*spawn a new thread if we leave this out */) {
2495 proc->requested_threads++;
2496 binder_debug(BINDER_DEBUG_THREADS,
2497 "%d:%d BR_SPAWN_LOOPER\n",
2498 proc->pid, thread->pid);
2499 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2501 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
2506 static void binder_release_work(struct list_head *list)
2508 struct binder_work *w;
2510 while (!list_empty(list)) {
2511 w = list_first_entry(list, struct binder_work, entry);
2512 list_del_init(&w->entry);
2514 case BINDER_WORK_TRANSACTION: {
2515 struct binder_transaction *t;
2517 t = container_of(w, struct binder_transaction, work);
2518 if (t->buffer->target_node &&
2519 !(t->flags & TF_ONE_WAY)) {
2520 binder_send_failed_reply(t, BR_DEAD_REPLY);
2522 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2523 "undelivered transaction %d\n",
2525 t->buffer->transaction = NULL;
2527 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2530 case BINDER_WORK_TRANSACTION_COMPLETE: {
2531 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2532 "undelivered TRANSACTION_COMPLETE\n");
2534 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2536 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2537 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2538 struct binder_ref_death *death;
2540 death = container_of(w, struct binder_ref_death, work);
2541 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2542 "undelivered death notification, %016llx\n",
2543 (u64)death->cookie);
2545 binder_stats_deleted(BINDER_STAT_DEATH);
2548 pr_err("unexpected work type, %d, not freed\n",
2556 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2558 struct binder_thread *thread = NULL;
2559 struct rb_node *parent = NULL;
2560 struct rb_node **p = &proc->threads.rb_node;
2564 thread = rb_entry(parent, struct binder_thread, rb_node);
2566 if (current->pid < thread->pid)
2568 else if (current->pid > thread->pid)
2569 p = &(*p)->rb_right;
2574 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2577 binder_stats_created(BINDER_STAT_THREAD);
2578 thread->proc = proc;
2579 thread->pid = current->pid;
2580 init_waitqueue_head(&thread->wait);
2581 INIT_LIST_HEAD(&thread->todo);
2582 rb_link_node(&thread->rb_node, parent, p);
2583 rb_insert_color(&thread->rb_node, &proc->threads);
2584 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2585 thread->return_error = BR_OK;
2586 thread->return_error2 = BR_OK;
2591 static int binder_free_thread(struct binder_proc *proc,
2592 struct binder_thread *thread)
2594 struct binder_transaction *t;
2595 struct binder_transaction *send_reply = NULL;
2596 int active_transactions = 0;
2598 rb_erase(&thread->rb_node, &proc->threads);
2599 t = thread->transaction_stack;
2600 if (t && t->to_thread == thread)
2603 active_transactions++;
2604 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2605 "release %d:%d transaction %d %s, still active\n",
2606 proc->pid, thread->pid,
2608 (t->to_thread == thread) ? "in" : "out");
2610 if (t->to_thread == thread) {
2612 t->to_thread = NULL;
2614 t->buffer->transaction = NULL;
2618 } else if (t->from == thread) {
2626 * If this thread used poll, make sure we remove the waitqueue from any
2627 * poll data structures holding it.
2629 if (thread->looper & BINDER_LOOPER_STATE_POLL)
2630 wake_up_pollfree(&thread->wait);
2633 * This is needed to avoid races between wake_up_pollfree() above and
2634 * someone else removing the last entry from the queue for other reasons
2635 * (e.g. ep_remove_wait_queue() being called due to an epoll file
2636 * descriptor being closed). Such other users hold an RCU read lock, so
2637 * we can be sure they're done after we call synchronize_rcu().
2639 if (thread->looper & BINDER_LOOPER_STATE_POLL)
2643 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
2644 binder_release_work(&thread->todo);
2646 binder_stats_deleted(BINDER_STAT_THREAD);
2647 return active_transactions;
2650 static unsigned int binder_poll(struct file *filp,
2651 struct poll_table_struct *wait)
2653 struct binder_proc *proc = filp->private_data;
2654 struct binder_thread *thread = NULL;
2655 int wait_for_proc_work;
2657 binder_lock(__func__);
2659 thread = binder_get_thread(proc);
2661 binder_unlock(__func__);
2665 thread->looper |= BINDER_LOOPER_STATE_POLL;
2667 wait_for_proc_work = thread->transaction_stack == NULL &&
2668 list_empty(&thread->todo) && thread->return_error == BR_OK;
2670 binder_unlock(__func__);
2672 if (wait_for_proc_work) {
2673 if (binder_has_proc_work(proc, thread))
2675 poll_wait(filp, &proc->wait, wait);
2676 if (binder_has_proc_work(proc, thread))
2679 if (binder_has_thread_work(thread))
2681 poll_wait(filp, &thread->wait, wait);
2682 if (binder_has_thread_work(thread))
2688 static int binder_ioctl_write_read(struct file *filp,
2689 unsigned int cmd, unsigned long arg,
2690 struct binder_thread *thread)
2693 struct binder_proc *proc = filp->private_data;
2694 unsigned int size = _IOC_SIZE(cmd);
2695 void __user *ubuf = (void __user *)arg;
2696 struct binder_write_read bwr;
2698 if (size != sizeof(struct binder_write_read)) {
2702 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
2706 binder_debug(BINDER_DEBUG_READ_WRITE,
2707 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
2708 proc->pid, thread->pid,
2709 (u64)bwr.write_size, (u64)bwr.write_buffer,
2710 (u64)bwr.read_size, (u64)bwr.read_buffer);
2712 if (bwr.write_size > 0) {
2713 ret = binder_thread_write(proc, thread,
2716 &bwr.write_consumed);
2717 trace_binder_write_done(ret);
2719 bwr.read_consumed = 0;
2720 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2725 if (bwr.read_size > 0) {
2726 ret = binder_thread_read(proc, thread, bwr.read_buffer,
2729 filp->f_flags & O_NONBLOCK);
2730 trace_binder_read_done(ret);
2731 if (!list_empty(&proc->todo))
2732 wake_up_interruptible(&proc->wait);
2734 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2739 binder_debug(BINDER_DEBUG_READ_WRITE,
2740 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
2741 proc->pid, thread->pid,
2742 (u64)bwr.write_consumed, (u64)bwr.write_size,
2743 (u64)bwr.read_consumed, (u64)bwr.read_size);
2744 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
2752 static int binder_ioctl_set_ctx_mgr(struct file *filp)
2755 struct binder_proc *proc = filp->private_data;
2756 kuid_t curr_euid = current_euid();
2758 if (binder_context_mgr_node != NULL) {
2759 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
2763 ret = security_binder_set_context_mgr(proc->cred);
2766 if (uid_valid(binder_context_mgr_uid)) {
2767 if (!uid_eq(binder_context_mgr_uid, curr_euid)) {
2768 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
2769 from_kuid(&init_user_ns, curr_euid),
2770 from_kuid(&init_user_ns,
2771 binder_context_mgr_uid));
2776 binder_context_mgr_uid = curr_euid;
2778 binder_context_mgr_node = binder_new_node(proc, 0, 0);
2779 if (binder_context_mgr_node == NULL) {
2783 binder_context_mgr_node->local_weak_refs++;
2784 binder_context_mgr_node->local_strong_refs++;
2785 binder_context_mgr_node->has_strong_ref = 1;
2786 binder_context_mgr_node->has_weak_ref = 1;
2791 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2794 struct binder_proc *proc = filp->private_data;
2795 struct binder_thread *thread;
2796 unsigned int size = _IOC_SIZE(cmd);
2797 void __user *ubuf = (void __user *)arg;
2799 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
2800 proc->pid, current->pid, cmd, arg);*/
2802 trace_binder_ioctl(cmd, arg);
2804 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2808 binder_lock(__func__);
2809 thread = binder_get_thread(proc);
2810 if (thread == NULL) {
2816 case BINDER_WRITE_READ:
2817 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
2821 case BINDER_SET_MAX_THREADS:
2822 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
2827 case BINDER_SET_CONTEXT_MGR:
2828 ret = binder_ioctl_set_ctx_mgr(filp);
2832 case BINDER_THREAD_EXIT:
2833 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
2834 proc->pid, thread->pid);
2835 binder_free_thread(proc, thread);
2838 case BINDER_VERSION: {
2839 struct binder_version __user *ver = ubuf;
2841 if (size != sizeof(struct binder_version)) {
2845 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
2846 &ver->protocol_version)) {
2859 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
2860 binder_unlock(__func__);
2861 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2862 if (ret && ret != -ERESTARTSYS)
2863 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
2865 trace_binder_ioctl_done(ret);
2869 static void binder_vma_open(struct vm_area_struct *vma)
2871 struct binder_proc *proc = vma->vm_private_data;
2873 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2874 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2875 proc->pid, vma->vm_start, vma->vm_end,
2876 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2877 (unsigned long)pgprot_val(vma->vm_page_prot));
2880 static void binder_vma_close(struct vm_area_struct *vma)
2882 struct binder_proc *proc = vma->vm_private_data;
2884 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2885 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2886 proc->pid, vma->vm_start, vma->vm_end,
2887 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2888 (unsigned long)pgprot_val(vma->vm_page_prot));
2890 proc->vma_vm_mm = NULL;
2891 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
2894 static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2896 return VM_FAULT_SIGBUS;
2899 static const struct vm_operations_struct binder_vm_ops = {
2900 .open = binder_vma_open,
2901 .close = binder_vma_close,
2902 .fault = binder_vm_fault,
2905 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
2908 struct vm_struct *area;
2909 struct binder_proc *proc = filp->private_data;
2910 const char *failure_string;
2911 struct binder_buffer *buffer;
2913 if (proc->tsk != current->group_leader)
2916 if ((vma->vm_end - vma->vm_start) > SZ_4M)
2917 vma->vm_end = vma->vm_start + SZ_4M;
2919 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2920 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
2921 proc->pid, vma->vm_start, vma->vm_end,
2922 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2923 (unsigned long)pgprot_val(vma->vm_page_prot));
2925 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
2927 failure_string = "bad vm_flags";
2930 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
2932 mutex_lock(&binder_mmap_lock);
2935 failure_string = "already mapped";
2936 goto err_already_mapped;
2939 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
2942 failure_string = "get_vm_area";
2943 goto err_get_vm_area_failed;
2945 proc->buffer = area->addr;
2946 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
2947 mutex_unlock(&binder_mmap_lock);
2949 #ifdef CONFIG_CPU_CACHE_VIPT
2950 if (cache_is_vipt_aliasing()) {
2951 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
2952 pr_info("binder_mmap: %d %lx-%lx maps %pK bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
2953 vma->vm_start += PAGE_SIZE;
2957 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
2958 if (proc->pages == NULL) {
2960 failure_string = "alloc page array";
2961 goto err_alloc_pages_failed;
2963 proc->buffer_size = vma->vm_end - vma->vm_start;
2965 vma->vm_ops = &binder_vm_ops;
2966 vma->vm_private_data = proc;
2968 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
2970 failure_string = "alloc small buf";
2971 goto err_alloc_small_buf_failed;
2973 buffer = proc->buffer;
2974 INIT_LIST_HEAD(&proc->buffers);
2975 list_add(&buffer->entry, &proc->buffers);
2977 binder_insert_free_buffer(proc, buffer);
2978 proc->free_async_space = proc->buffer_size / 2;
2980 proc->files = get_files_struct(current);
2982 proc->vma_vm_mm = vma->vm_mm;
2984 /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
2985 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
2988 err_alloc_small_buf_failed:
2991 err_alloc_pages_failed:
2992 mutex_lock(&binder_mmap_lock);
2993 vfree(proc->buffer);
2994 proc->buffer = NULL;
2995 err_get_vm_area_failed:
2997 mutex_unlock(&binder_mmap_lock);
2999 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
3000 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
3004 static int binder_open(struct inode *nodp, struct file *filp)
3006 struct binder_proc *proc;
3008 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
3009 current->group_leader->pid, current->pid);
3011 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
3014 get_task_struct(current->group_leader);
3015 proc->tsk = current->group_leader;
3016 proc->cred = get_cred(filp->f_cred);
3017 INIT_LIST_HEAD(&proc->todo);
3018 init_waitqueue_head(&proc->wait);
3019 proc->default_priority = task_nice(current);
3021 binder_lock(__func__);
3023 binder_stats_created(BINDER_STAT_PROC);
3024 hlist_add_head(&proc->proc_node, &binder_procs);
3025 proc->pid = current->group_leader->pid;
3026 INIT_LIST_HEAD(&proc->delivered_death);
3027 filp->private_data = proc;
3029 binder_unlock(__func__);
3031 if (binder_debugfs_dir_entry_proc) {
3034 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
3035 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
3036 binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
3042 static int binder_flush(struct file *filp, fl_owner_t id)
3044 struct binder_proc *proc = filp->private_data;
3046 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
3051 static void binder_deferred_flush(struct binder_proc *proc)
3056 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
3057 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
3059 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
3060 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
3061 wake_up_interruptible(&thread->wait);
3065 wake_up_interruptible_all(&proc->wait);
3067 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3068 "binder_flush: %d woke %d threads\n", proc->pid,
3072 static int binder_release(struct inode *nodp, struct file *filp)
3074 struct binder_proc *proc = filp->private_data;
3076 debugfs_remove(proc->debugfs_entry);
3077 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3082 static int binder_node_release(struct binder_node *node, int refs)
3084 struct binder_ref *ref;
3087 list_del_init(&node->work.entry);
3088 binder_release_work(&node->async_todo);
3090 if (hlist_empty(&node->refs)) {
3092 binder_stats_deleted(BINDER_STAT_NODE);
3098 node->local_strong_refs = 0;
3099 node->local_weak_refs = 0;
3100 hlist_add_head(&node->dead_node, &binder_dead_nodes);
3102 hlist_for_each_entry(ref, &node->refs, node_entry) {
3110 if (list_empty(&ref->death->work.entry)) {
3111 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3112 list_add_tail(&ref->death->work.entry,
3114 wake_up_interruptible(&ref->proc->wait);
3119 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3120 "node %d now dead, refs %d, death %d\n",
3121 node->debug_id, refs, death);
3126 static void binder_deferred_release(struct binder_proc *proc)
3128 struct binder_transaction *t;
3130 int threads, nodes, incoming_refs, outgoing_refs, buffers,
3131 active_transactions, page_count;
3134 BUG_ON(proc->files);
3136 hlist_del(&proc->proc_node);
3138 if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
3139 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3140 "%s: %d context_mgr_node gone\n",
3141 __func__, proc->pid);
3142 binder_context_mgr_node = NULL;
3146 active_transactions = 0;
3147 while ((n = rb_first(&proc->threads))) {
3148 struct binder_thread *thread;
3150 thread = rb_entry(n, struct binder_thread, rb_node);
3152 active_transactions += binder_free_thread(proc, thread);
3157 while ((n = rb_first(&proc->nodes))) {
3158 struct binder_node *node;
3160 node = rb_entry(n, struct binder_node, rb_node);
3162 rb_erase(&node->rb_node, &proc->nodes);
3163 incoming_refs = binder_node_release(node, incoming_refs);
3167 while ((n = rb_first(&proc->refs_by_desc))) {
3168 struct binder_ref *ref;
3170 ref = rb_entry(n, struct binder_ref, rb_node_desc);
3172 binder_delete_ref(ref);
3175 binder_release_work(&proc->todo);
3176 binder_release_work(&proc->delivered_death);
3179 while ((n = rb_first(&proc->allocated_buffers))) {
3180 struct binder_buffer *buffer;
3182 buffer = rb_entry(n, struct binder_buffer, rb_node);
3184 t = buffer->transaction;
3187 buffer->transaction = NULL;
3188 pr_err("release proc %d, transaction %d, not freed\n",
3189 proc->pid, t->debug_id);
3193 binder_free_buf(proc, buffer);
3197 binder_stats_deleted(BINDER_STAT_PROC);
3203 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
3206 if (!proc->pages[i])
3209 page_addr = proc->buffer + i * PAGE_SIZE;
3210 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
3211 "%s: %d: page %d at %pK not freed\n",
3212 __func__, proc->pid, i, page_addr);
3213 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
3214 __free_page(proc->pages[i]);
3218 vfree(proc->buffer);
3221 put_task_struct(proc->tsk);
3222 put_cred(proc->cred);
3224 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3225 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
3226 __func__, proc->pid, threads, nodes, incoming_refs,
3227 outgoing_refs, active_transactions, buffers, page_count);
3232 static void binder_deferred_func(struct work_struct *work)
3234 struct binder_proc *proc;
3235 struct files_struct *files;
3240 binder_lock(__func__);
3241 mutex_lock(&binder_deferred_lock);
3242 if (!hlist_empty(&binder_deferred_list)) {
3243 proc = hlist_entry(binder_deferred_list.first,
3244 struct binder_proc, deferred_work_node);
3245 hlist_del_init(&proc->deferred_work_node);
3246 defer = proc->deferred_work;
3247 proc->deferred_work = 0;
3252 mutex_unlock(&binder_deferred_lock);
3255 if (defer & BINDER_DEFERRED_PUT_FILES) {
3256 files = proc->files;
3261 if (defer & BINDER_DEFERRED_FLUSH)
3262 binder_deferred_flush(proc);
3264 if (defer & BINDER_DEFERRED_RELEASE)
3265 binder_deferred_release(proc); /* frees proc */
3267 binder_unlock(__func__);
3269 put_files_struct(files);
3272 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3275 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3277 mutex_lock(&binder_deferred_lock);
3278 proc->deferred_work |= defer;
3279 if (hlist_unhashed(&proc->deferred_work_node)) {
3280 hlist_add_head(&proc->deferred_work_node,
3281 &binder_deferred_list);
3282 queue_work(binder_deferred_workqueue, &binder_deferred_work);
3284 mutex_unlock(&binder_deferred_lock);
3287 static void print_binder_transaction(struct seq_file *m, const char *prefix,
3288 struct binder_transaction *t)
3291 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3292 prefix, t->debug_id, t,
3293 t->from ? t->from->proc->pid : 0,
3294 t->from ? t->from->pid : 0,
3295 t->to_proc ? t->to_proc->pid : 0,
3296 t->to_thread ? t->to_thread->pid : 0,
3297 t->code, t->flags, t->priority, t->need_reply);
3298 if (t->buffer == NULL) {
3299 seq_puts(m, " buffer free\n");
3302 if (t->buffer->target_node)
3303 seq_printf(m, " node %d",
3304 t->buffer->target_node->debug_id);
3305 seq_printf(m, " size %zd:%zd data %pK\n",
3306 t->buffer->data_size, t->buffer->offsets_size,
3310 static void print_binder_buffer(struct seq_file *m, const char *prefix,
3311 struct binder_buffer *buffer)
3313 seq_printf(m, "%s %d: %pK size %zd:%zd %s\n",
3314 prefix, buffer->debug_id, buffer->data,
3315 buffer->data_size, buffer->offsets_size,
3316 buffer->transaction ? "active" : "delivered");
3319 static void print_binder_work(struct seq_file *m, const char *prefix,
3320 const char *transaction_prefix,
3321 struct binder_work *w)
3323 struct binder_node *node;
3324 struct binder_transaction *t;
3327 case BINDER_WORK_TRANSACTION:
3328 t = container_of(w, struct binder_transaction, work);
3329 print_binder_transaction(m, transaction_prefix, t);
3331 case BINDER_WORK_TRANSACTION_COMPLETE:
3332 seq_printf(m, "%stransaction complete\n", prefix);
3334 case BINDER_WORK_NODE:
3335 node = container_of(w, struct binder_node, work);
3336 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
3337 prefix, node->debug_id,
3338 (u64)node->ptr, (u64)node->cookie);
3340 case BINDER_WORK_DEAD_BINDER:
3341 seq_printf(m, "%shas dead binder\n", prefix);
3343 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3344 seq_printf(m, "%shas cleared dead binder\n", prefix);
3346 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
3347 seq_printf(m, "%shas cleared death notification\n", prefix);
3350 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
3355 static void print_binder_thread(struct seq_file *m,
3356 struct binder_thread *thread,
3359 struct binder_transaction *t;
3360 struct binder_work *w;
3361 size_t start_pos = m->count;
3364 seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
3365 header_pos = m->count;
3366 t = thread->transaction_stack;
3368 if (t->from == thread) {
3369 print_binder_transaction(m,
3370 " outgoing transaction", t);
3372 } else if (t->to_thread == thread) {
3373 print_binder_transaction(m,
3374 " incoming transaction", t);
3377 print_binder_transaction(m, " bad transaction", t);
3381 list_for_each_entry(w, &thread->todo, entry) {
3382 print_binder_work(m, " ", " pending transaction", w);
3384 if (!print_always && m->count == header_pos)
3385 m->count = start_pos;
3388 static void print_binder_node(struct seq_file *m, struct binder_node *node)
3390 struct binder_ref *ref;
3391 struct binder_work *w;
3395 hlist_for_each_entry(ref, &node->refs, node_entry)
3398 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3399 node->debug_id, (u64)node->ptr, (u64)node->cookie,
3400 node->has_strong_ref, node->has_weak_ref,
3401 node->local_strong_refs, node->local_weak_refs,
3402 node->internal_strong_refs, count);
3404 seq_puts(m, " proc");
3405 hlist_for_each_entry(ref, &node->refs, node_entry)
3406 seq_printf(m, " %d", ref->proc->pid);
3409 list_for_each_entry(w, &node->async_todo, entry)
3410 print_binder_work(m, " ",
3411 " pending async transaction", w);
3414 static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
3416 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
3417 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3418 ref->node->debug_id, ref->strong, ref->weak, ref->death);
3421 static void print_binder_proc(struct seq_file *m,
3422 struct binder_proc *proc, int print_all)
3424 struct binder_work *w;
3426 size_t start_pos = m->count;
3429 seq_printf(m, "proc %d\n", proc->pid);
3430 header_pos = m->count;
3432 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3433 print_binder_thread(m, rb_entry(n, struct binder_thread,
3434 rb_node), print_all);
3435 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
3436 struct binder_node *node = rb_entry(n, struct binder_node,
3438 if (print_all || node->has_async_transaction)
3439 print_binder_node(m, node);
3442 for (n = rb_first(&proc->refs_by_desc);
3445 print_binder_ref(m, rb_entry(n, struct binder_ref,
3448 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3449 print_binder_buffer(m, " buffer",
3450 rb_entry(n, struct binder_buffer, rb_node));
3451 list_for_each_entry(w, &proc->todo, entry)
3452 print_binder_work(m, " ", " pending transaction", w);
3453 list_for_each_entry(w, &proc->delivered_death, entry) {
3454 seq_puts(m, " has delivered dead binder\n");
3457 if (!print_all && m->count == header_pos)
3458 m->count = start_pos;
3461 static const char * const binder_return_strings[] = {
3466 "BR_ACQUIRE_RESULT",
3468 "BR_TRANSACTION_COMPLETE",
3473 "BR_ATTEMPT_ACQUIRE",
3478 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3482 static const char * const binder_command_strings[] = {
3485 "BC_ACQUIRE_RESULT",
3493 "BC_ATTEMPT_ACQUIRE",
3494 "BC_REGISTER_LOOPER",
3497 "BC_REQUEST_DEATH_NOTIFICATION",
3498 "BC_CLEAR_DEATH_NOTIFICATION",
3499 "BC_DEAD_BINDER_DONE"
3502 static const char * const binder_objstat_strings[] = {
3509 "transaction_complete"
3512 static void print_binder_stats(struct seq_file *m, const char *prefix,
3513 struct binder_stats *stats)
3517 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
3518 ARRAY_SIZE(binder_command_strings));
3519 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
3521 seq_printf(m, "%s%s: %d\n", prefix,
3522 binder_command_strings[i], stats->bc[i]);
3525 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
3526 ARRAY_SIZE(binder_return_strings));
3527 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
3529 seq_printf(m, "%s%s: %d\n", prefix,
3530 binder_return_strings[i], stats->br[i]);
3533 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3534 ARRAY_SIZE(binder_objstat_strings));
3535 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3536 ARRAY_SIZE(stats->obj_deleted));
3537 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
3538 if (stats->obj_created[i] || stats->obj_deleted[i])
3539 seq_printf(m, "%s%s: active %d total %d\n", prefix,
3540 binder_objstat_strings[i],
3541 stats->obj_created[i] - stats->obj_deleted[i],
3542 stats->obj_created[i]);
3546 static void print_binder_proc_stats(struct seq_file *m,
3547 struct binder_proc *proc)
3549 struct binder_work *w;
3551 int count, strong, weak;
3553 seq_printf(m, "proc %d\n", proc->pid);
3555 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3557 seq_printf(m, " threads: %d\n", count);
3558 seq_printf(m, " requested threads: %d+%d/%d\n"
3559 " ready threads %d\n"
3560 " free async space %zd\n", proc->requested_threads,
3561 proc->requested_threads_started, proc->max_threads,
3562 proc->ready_threads, proc->free_async_space);
3564 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3566 seq_printf(m, " nodes: %d\n", count);
3570 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3571 struct binder_ref *ref = rb_entry(n, struct binder_ref,
3574 strong += ref->strong;
3577 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
3580 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3582 seq_printf(m, " buffers: %d\n", count);
3585 list_for_each_entry(w, &proc->todo, entry) {
3587 case BINDER_WORK_TRANSACTION:
3594 seq_printf(m, " pending transactions: %d\n", count);
3596 print_binder_stats(m, " ", &proc->stats);
3600 static int binder_state_show(struct seq_file *m, void *unused)
3602 struct binder_proc *proc;
3603 struct binder_node *node;
3604 int do_lock = !binder_debug_no_lock;
3607 binder_lock(__func__);
3609 seq_puts(m, "binder state:\n");
3611 if (!hlist_empty(&binder_dead_nodes))
3612 seq_puts(m, "dead nodes:\n");
3613 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
3614 print_binder_node(m, node);
3616 hlist_for_each_entry(proc, &binder_procs, proc_node)
3617 print_binder_proc(m, proc, 1);
3619 binder_unlock(__func__);
3623 static int binder_stats_show(struct seq_file *m, void *unused)
3625 struct binder_proc *proc;
3626 int do_lock = !binder_debug_no_lock;
3629 binder_lock(__func__);
3631 seq_puts(m, "binder stats:\n");
3633 print_binder_stats(m, "", &binder_stats);
3635 hlist_for_each_entry(proc, &binder_procs, proc_node)
3636 print_binder_proc_stats(m, proc);
3638 binder_unlock(__func__);
3642 static int binder_transactions_show(struct seq_file *m, void *unused)
3644 struct binder_proc *proc;
3645 int do_lock = !binder_debug_no_lock;
3648 binder_lock(__func__);
3650 seq_puts(m, "binder transactions:\n");
3651 hlist_for_each_entry(proc, &binder_procs, proc_node)
3652 print_binder_proc(m, proc, 0);
3654 binder_unlock(__func__);
3658 static int binder_proc_show(struct seq_file *m, void *unused)
3660 struct binder_proc *proc = m->private;
3661 int do_lock = !binder_debug_no_lock;
3664 binder_lock(__func__);
3665 seq_puts(m, "binder proc state:\n");
3666 print_binder_proc(m, proc, 1);
3668 binder_unlock(__func__);
3672 static void print_binder_transaction_log_entry(struct seq_file *m,
3673 struct binder_transaction_log_entry *e)
3676 "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
3677 e->debug_id, (e->call_type == 2) ? "reply" :
3678 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
3679 e->from_thread, e->to_proc, e->to_thread, e->to_node,
3680 e->target_handle, e->data_size, e->offsets_size);
3683 static int binder_transaction_log_show(struct seq_file *m, void *unused)
3685 struct binder_transaction_log *log = m->private;
3689 for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
3690 print_binder_transaction_log_entry(m, &log->entry[i]);
3692 for (i = 0; i < log->next; i++)
3693 print_binder_transaction_log_entry(m, &log->entry[i]);
3697 static const struct file_operations binder_fops = {
3698 .owner = THIS_MODULE,
3699 .poll = binder_poll,
3700 .unlocked_ioctl = binder_ioctl,
3701 .compat_ioctl = binder_ioctl,
3702 .mmap = binder_mmap,
3703 .open = binder_open,
3704 .flush = binder_flush,
3705 .release = binder_release,
3708 static struct miscdevice binder_miscdev = {
3709 .minor = MISC_DYNAMIC_MINOR,
3711 .fops = &binder_fops
3714 BINDER_DEBUG_ENTRY(state);
3715 BINDER_DEBUG_ENTRY(stats);
3716 BINDER_DEBUG_ENTRY(transactions);
3717 BINDER_DEBUG_ENTRY(transaction_log);
3719 static int __init binder_init(void)
3723 binder_deferred_workqueue = create_singlethread_workqueue("binder");
3724 if (!binder_deferred_workqueue)
3727 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
3728 if (binder_debugfs_dir_entry_root)
3729 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
3730 binder_debugfs_dir_entry_root);
3731 ret = misc_register(&binder_miscdev);
3732 if (binder_debugfs_dir_entry_root) {
3733 debugfs_create_file("state",
3735 binder_debugfs_dir_entry_root,
3737 &binder_state_fops);
3738 debugfs_create_file("stats",
3740 binder_debugfs_dir_entry_root,
3742 &binder_stats_fops);
3743 debugfs_create_file("transactions",
3745 binder_debugfs_dir_entry_root,
3747 &binder_transactions_fops);
3748 debugfs_create_file("transaction_log",
3750 binder_debugfs_dir_entry_root,
3751 &binder_transaction_log,
3752 &binder_transaction_log_fops);
3753 debugfs_create_file("failed_transaction_log",
3755 binder_debugfs_dir_entry_root,
3756 &binder_transaction_log_failed,
3757 &binder_transaction_log_fops);
3762 device_initcall(binder_init);
3764 #define CREATE_TRACE_POINTS
3765 #include "binder_trace.h"
3767 MODULE_LICENSE("GPL v2");