3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <asm/cacheflush.h>
21 #include <linux/fdtable.h>
22 #include <linux/file.h>
23 #include <linux/freezer.h>
25 #include <linux/list.h>
26 #include <linux/miscdevice.h>
28 #include <linux/module.h>
29 #include <linux/mutex.h>
30 #include <linux/nsproxy.h>
31 #include <linux/poll.h>
32 #include <linux/debugfs.h>
33 #include <linux/rbtree.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/uaccess.h>
37 #include <linux/vmalloc.h>
38 #include <linux/slab.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/security.h>
42 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
43 #define BINDER_IPC_32BIT 1
46 #include <uapi/linux/android/binder.h>
47 #include "binder_trace.h"
49 static DEFINE_MUTEX(binder_main_lock);
50 static DEFINE_MUTEX(binder_deferred_lock);
51 static DEFINE_MUTEX(binder_mmap_lock);
53 static HLIST_HEAD(binder_procs);
54 static HLIST_HEAD(binder_deferred_list);
55 static HLIST_HEAD(binder_dead_nodes);
57 static struct dentry *binder_debugfs_dir_entry_root;
58 static struct dentry *binder_debugfs_dir_entry_proc;
59 static struct binder_node *binder_context_mgr_node;
60 static kuid_t binder_context_mgr_uid = INVALID_UID;
61 static int binder_last_id;
63 #define BINDER_DEBUG_ENTRY(name) \
64 static int binder_##name##_open(struct inode *inode, struct file *file) \
66 return single_open(file, binder_##name##_show, inode->i_private); \
69 static const struct file_operations binder_##name##_fops = { \
70 .owner = THIS_MODULE, \
71 .open = binder_##name##_open, \
73 .llseek = seq_lseek, \
74 .release = single_release, \
77 static int binder_proc_show(struct seq_file *m, void *unused);
78 BINDER_DEBUG_ENTRY(proc);
80 /* This is only defined in include/asm-arm/sizes.h */
86 #define SZ_4M 0x400000
89 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
91 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
94 BINDER_DEBUG_USER_ERROR = 1U << 0,
95 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
96 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
97 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
98 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
99 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
100 BINDER_DEBUG_READ_WRITE = 1U << 6,
101 BINDER_DEBUG_USER_REFS = 1U << 7,
102 BINDER_DEBUG_THREADS = 1U << 8,
103 BINDER_DEBUG_TRANSACTION = 1U << 9,
104 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
105 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
106 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
107 BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
108 BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
109 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
111 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
112 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
113 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
115 static bool binder_debug_no_lock;
116 module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
118 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
119 static int binder_stop_on_user_error;
121 static int binder_set_stop_on_user_error(const char *val,
122 struct kernel_param *kp)
126 ret = param_set_int(val, kp);
127 if (binder_stop_on_user_error < 2)
128 wake_up(&binder_user_error_wait);
131 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
132 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
134 #define binder_debug(mask, x...) \
136 if (binder_debug_mask & mask) \
140 #define binder_user_error(x...) \
142 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
144 if (binder_stop_on_user_error) \
145 binder_stop_on_user_error = 2; \
148 enum binder_stat_types {
154 BINDER_STAT_TRANSACTION,
155 BINDER_STAT_TRANSACTION_COMPLETE,
159 struct binder_stats {
160 int br[_IOC_NR(BR_FAILED_REPLY) + 1];
161 int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
162 int obj_created[BINDER_STAT_COUNT];
163 int obj_deleted[BINDER_STAT_COUNT];
166 static struct binder_stats binder_stats;
168 static inline void binder_stats_deleted(enum binder_stat_types type)
170 binder_stats.obj_deleted[type]++;
173 static inline void binder_stats_created(enum binder_stat_types type)
175 binder_stats.obj_created[type]++;
178 struct binder_transaction_log_entry {
190 struct binder_transaction_log {
193 struct binder_transaction_log_entry entry[32];
195 static struct binder_transaction_log binder_transaction_log;
196 static struct binder_transaction_log binder_transaction_log_failed;
198 static struct binder_transaction_log_entry *binder_transaction_log_add(
199 struct binder_transaction_log *log)
201 struct binder_transaction_log_entry *e;
203 e = &log->entry[log->next];
204 memset(e, 0, sizeof(*e));
206 if (log->next == ARRAY_SIZE(log->entry)) {
214 struct list_head entry;
216 BINDER_WORK_TRANSACTION = 1,
217 BINDER_WORK_TRANSACTION_COMPLETE,
219 BINDER_WORK_DEAD_BINDER,
220 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
221 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
227 struct binder_work work;
229 struct rb_node rb_node;
230 struct hlist_node dead_node;
232 struct binder_proc *proc;
233 struct hlist_head refs;
234 int internal_strong_refs;
236 int local_strong_refs;
237 binder_uintptr_t ptr;
238 binder_uintptr_t cookie;
239 unsigned has_strong_ref:1;
240 unsigned pending_strong_ref:1;
241 unsigned has_weak_ref:1;
242 unsigned pending_weak_ref:1;
243 unsigned has_async_transaction:1;
244 unsigned accept_fds:1;
245 unsigned min_priority:8;
246 struct list_head async_todo;
249 struct binder_ref_death {
250 struct binder_work work;
251 binder_uintptr_t cookie;
255 /* Lookups needed: */
256 /* node + proc => ref (transaction) */
257 /* desc + proc => ref (transaction, inc/dec ref) */
258 /* node => refs + procs (proc exit) */
260 struct rb_node rb_node_desc;
261 struct rb_node rb_node_node;
262 struct hlist_node node_entry;
263 struct binder_proc *proc;
264 struct binder_node *node;
268 struct binder_ref_death *death;
271 struct binder_buffer {
272 struct list_head entry; /* free and allocated entries by address */
273 struct rb_node rb_node; /* free entry by size or allocated entry */
276 unsigned allow_user_free:1;
277 unsigned async_transaction:1;
278 unsigned debug_id:29;
280 struct binder_transaction *transaction;
282 struct binder_node *target_node;
288 enum binder_deferred_state {
289 BINDER_DEFERRED_PUT_FILES = 0x01,
290 BINDER_DEFERRED_FLUSH = 0x02,
291 BINDER_DEFERRED_RELEASE = 0x04,
295 struct hlist_node proc_node;
296 struct rb_root threads;
297 struct rb_root nodes;
298 struct rb_root refs_by_desc;
299 struct rb_root refs_by_node;
301 struct vm_area_struct *vma;
302 struct mm_struct *vma_vm_mm;
303 struct task_struct *tsk;
304 struct files_struct *files;
305 struct mutex files_lock;
306 const struct cred *cred;
307 struct hlist_node deferred_work_node;
310 ptrdiff_t user_buffer_offset;
312 struct list_head buffers;
313 struct rb_root free_buffers;
314 struct rb_root allocated_buffers;
315 size_t free_async_space;
319 uint32_t buffer_free;
320 struct list_head todo;
321 wait_queue_head_t wait;
322 struct binder_stats stats;
323 struct list_head delivered_death;
325 int requested_threads;
326 int requested_threads_started;
328 long default_priority;
329 struct dentry *debugfs_entry;
333 BINDER_LOOPER_STATE_REGISTERED = 0x01,
334 BINDER_LOOPER_STATE_ENTERED = 0x02,
335 BINDER_LOOPER_STATE_EXITED = 0x04,
336 BINDER_LOOPER_STATE_INVALID = 0x08,
337 BINDER_LOOPER_STATE_WAITING = 0x10,
338 BINDER_LOOPER_STATE_NEED_RETURN = 0x20,
339 BINDER_LOOPER_STATE_POLL = 0x40,
342 struct binder_thread {
343 struct binder_proc *proc;
344 struct rb_node rb_node;
347 struct binder_transaction *transaction_stack;
348 struct list_head todo;
349 uint32_t return_error; /* Write failed, return error code in read buf */
350 uint32_t return_error2; /* Write failed, return error code in read */
351 /* buffer. Used when sending a reply to a dead process that */
352 /* we are also waiting on */
353 wait_queue_head_t wait;
354 struct binder_stats stats;
357 struct binder_transaction {
359 struct binder_work work;
360 struct binder_thread *from;
361 struct binder_transaction *from_parent;
362 struct binder_proc *to_proc;
363 struct binder_thread *to_thread;
364 struct binder_transaction *to_parent;
365 unsigned need_reply:1;
366 /* unsigned is_dead:1; */ /* not used at the moment */
368 struct binder_buffer *buffer;
377 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
379 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
381 unsigned long rlim_cur;
385 mutex_lock(&proc->files_lock);
386 if (proc->files == NULL) {
390 if (!lock_task_sighand(proc->tsk, &irqs)) {
394 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
395 unlock_task_sighand(proc->tsk, &irqs);
397 ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
399 mutex_unlock(&proc->files_lock);
404 * copied from fd_install
406 static void task_fd_install(
407 struct binder_proc *proc, unsigned int fd, struct file *file)
409 mutex_lock(&proc->files_lock);
411 __fd_install(proc->files, fd, file);
412 mutex_unlock(&proc->files_lock);
416 * copied from sys_close
418 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
422 mutex_lock(&proc->files_lock);
423 if (proc->files == NULL) {
427 retval = __close_fd(proc->files, fd);
428 /* can't restart close syscall because file table entry was cleared */
429 if (unlikely(retval == -ERESTARTSYS ||
430 retval == -ERESTARTNOINTR ||
431 retval == -ERESTARTNOHAND ||
432 retval == -ERESTART_RESTARTBLOCK))
435 mutex_unlock(&proc->files_lock);
439 static inline void binder_lock(const char *tag)
441 trace_binder_lock(tag);
442 mutex_lock(&binder_main_lock);
443 trace_binder_locked(tag);
446 static inline void binder_unlock(const char *tag)
448 trace_binder_unlock(tag);
449 mutex_unlock(&binder_main_lock);
452 static void binder_set_nice(long nice)
456 if (can_nice(current, nice)) {
457 set_user_nice(current, nice);
460 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
461 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
462 "%d: nice value %ld not allowed use %ld instead\n",
463 current->pid, nice, min_nice);
464 set_user_nice(current, min_nice);
465 if (min_nice <= MAX_NICE)
467 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
470 static size_t binder_buffer_size(struct binder_proc *proc,
471 struct binder_buffer *buffer)
473 if (list_is_last(&buffer->entry, &proc->buffers))
474 return proc->buffer + proc->buffer_size - (void *)buffer->data;
475 return (size_t)list_entry(buffer->entry.next,
476 struct binder_buffer, entry) - (size_t)buffer->data;
479 static void binder_insert_free_buffer(struct binder_proc *proc,
480 struct binder_buffer *new_buffer)
482 struct rb_node **p = &proc->free_buffers.rb_node;
483 struct rb_node *parent = NULL;
484 struct binder_buffer *buffer;
486 size_t new_buffer_size;
488 BUG_ON(!new_buffer->free);
490 new_buffer_size = binder_buffer_size(proc, new_buffer);
492 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
493 "%d: add free buffer, size %zd, at %pK\n",
494 proc->pid, new_buffer_size, new_buffer);
498 buffer = rb_entry(parent, struct binder_buffer, rb_node);
499 BUG_ON(!buffer->free);
501 buffer_size = binder_buffer_size(proc, buffer);
503 if (new_buffer_size < buffer_size)
504 p = &parent->rb_left;
506 p = &parent->rb_right;
508 rb_link_node(&new_buffer->rb_node, parent, p);
509 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
512 static void binder_insert_allocated_buffer(struct binder_proc *proc,
513 struct binder_buffer *new_buffer)
515 struct rb_node **p = &proc->allocated_buffers.rb_node;
516 struct rb_node *parent = NULL;
517 struct binder_buffer *buffer;
519 BUG_ON(new_buffer->free);
523 buffer = rb_entry(parent, struct binder_buffer, rb_node);
524 BUG_ON(buffer->free);
526 if (new_buffer < buffer)
527 p = &parent->rb_left;
528 else if (new_buffer > buffer)
529 p = &parent->rb_right;
533 rb_link_node(&new_buffer->rb_node, parent, p);
534 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
537 static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
540 struct rb_node *n = proc->allocated_buffers.rb_node;
541 struct binder_buffer *buffer;
542 struct binder_buffer *kern_ptr;
544 kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
545 - offsetof(struct binder_buffer, data));
548 buffer = rb_entry(n, struct binder_buffer, rb_node);
549 BUG_ON(buffer->free);
551 if (kern_ptr < buffer)
553 else if (kern_ptr > buffer)
561 static int binder_update_page_range(struct binder_proc *proc, int allocate,
562 void *start, void *end,
563 struct vm_area_struct *vma)
566 unsigned long user_page_addr;
568 struct mm_struct *mm;
570 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
571 "%d: %s pages %pK-%pK\n", proc->pid,
572 allocate ? "allocate" : "free", start, end);
577 trace_binder_update_page_range(proc, allocate, start, end);
582 mm = get_task_mm(proc->tsk);
585 down_write(&mm->mmap_sem);
586 if (!mmget_still_valid(mm)) {
593 if (vma && mm != proc->vma_vm_mm) {
594 pr_err("%d: vma mm and task mm mismatch\n",
604 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
609 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
612 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
615 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
617 pr_err("%d: binder_alloc_buf failed for page at %pK\n",
618 proc->pid, page_addr);
619 goto err_alloc_page_failed;
621 ret = map_kernel_range_noflush((unsigned long)page_addr,
622 PAGE_SIZE, PAGE_KERNEL, page);
623 flush_cache_vmap((unsigned long)page_addr,
624 (unsigned long)page_addr + PAGE_SIZE);
626 pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
627 proc->pid, page_addr);
628 goto err_map_kernel_failed;
631 (uintptr_t)page_addr + proc->user_buffer_offset;
632 ret = vm_insert_page(vma, user_page_addr, page[0]);
634 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
635 proc->pid, user_page_addr);
636 goto err_vm_insert_page_failed;
638 /* vm_insert_page does not seem to increment the refcount */
641 up_write(&mm->mmap_sem);
647 for (page_addr = end - PAGE_SIZE; page_addr >= start;
648 page_addr -= PAGE_SIZE) {
649 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
651 zap_page_range(vma, (uintptr_t)page_addr +
652 proc->user_buffer_offset, PAGE_SIZE, NULL);
653 err_vm_insert_page_failed:
654 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
655 err_map_kernel_failed:
658 err_alloc_page_failed:
663 up_write(&mm->mmap_sem);
669 static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
671 size_t offsets_size, int is_async)
673 struct rb_node *n = proc->free_buffers.rb_node;
674 struct binder_buffer *buffer;
676 struct rb_node *best_fit = NULL;
681 if (proc->vma == NULL) {
682 pr_err("%d: binder_alloc_buf, no vma\n",
687 size = ALIGN(data_size, sizeof(void *)) +
688 ALIGN(offsets_size, sizeof(void *));
690 if (size < data_size || size < offsets_size) {
691 binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
692 proc->pid, data_size, offsets_size);
697 proc->free_async_space < size + sizeof(struct binder_buffer)) {
698 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
699 "%d: binder_alloc_buf size %zd failed, no async space left\n",
705 buffer = rb_entry(n, struct binder_buffer, rb_node);
706 BUG_ON(!buffer->free);
707 buffer_size = binder_buffer_size(proc, buffer);
709 if (size < buffer_size) {
712 } else if (size > buffer_size)
719 if (best_fit == NULL) {
720 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
725 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
726 buffer_size = binder_buffer_size(proc, buffer);
729 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
730 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
731 proc->pid, size, buffer, buffer_size);
734 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
736 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
737 buffer_size = size; /* no room for other buffers */
739 buffer_size = size + sizeof(struct binder_buffer);
742 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
743 if (end_page_addr > has_page_addr)
744 end_page_addr = has_page_addr;
745 if (binder_update_page_range(proc, 1,
746 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
749 rb_erase(best_fit, &proc->free_buffers);
751 binder_insert_allocated_buffer(proc, buffer);
752 if (buffer_size != size) {
753 struct binder_buffer *new_buffer = (void *)buffer->data + size;
755 list_add(&new_buffer->entry, &buffer->entry);
756 new_buffer->free = 1;
757 binder_insert_free_buffer(proc, new_buffer);
759 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
760 "%d: binder_alloc_buf size %zd got %pK\n",
761 proc->pid, size, buffer);
762 buffer->data_size = data_size;
763 buffer->offsets_size = offsets_size;
764 buffer->async_transaction = is_async;
766 proc->free_async_space -= size + sizeof(struct binder_buffer);
767 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
768 "%d: binder_alloc_buf size %zd async free %zd\n",
769 proc->pid, size, proc->free_async_space);
775 static void *buffer_start_page(struct binder_buffer *buffer)
777 return (void *)((uintptr_t)buffer & PAGE_MASK);
780 static void *buffer_end_page(struct binder_buffer *buffer)
782 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
785 static void binder_delete_free_buffer(struct binder_proc *proc,
786 struct binder_buffer *buffer)
788 struct binder_buffer *prev, *next = NULL;
789 int free_page_end = 1;
790 int free_page_start = 1;
792 BUG_ON(proc->buffers.next == &buffer->entry);
793 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
795 if (buffer_end_page(prev) == buffer_start_page(buffer)) {
797 if (buffer_end_page(prev) == buffer_end_page(buffer))
799 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
800 "%d: merge free, buffer %pK share page with %pK\n",
801 proc->pid, buffer, prev);
804 if (!list_is_last(&buffer->entry, &proc->buffers)) {
805 next = list_entry(buffer->entry.next,
806 struct binder_buffer, entry);
807 if (buffer_start_page(next) == buffer_end_page(buffer)) {
809 if (buffer_start_page(next) ==
810 buffer_start_page(buffer))
812 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
813 "%d: merge free, buffer %pK share page with %pK\n",
814 proc->pid, buffer, prev);
817 list_del(&buffer->entry);
818 if (free_page_start || free_page_end) {
819 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
820 "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
821 proc->pid, buffer, free_page_start ? "" : " end",
822 free_page_end ? "" : " start", prev, next);
823 binder_update_page_range(proc, 0, free_page_start ?
824 buffer_start_page(buffer) : buffer_end_page(buffer),
825 (free_page_end ? buffer_end_page(buffer) :
826 buffer_start_page(buffer)) + PAGE_SIZE, NULL);
830 static void binder_free_buf(struct binder_proc *proc,
831 struct binder_buffer *buffer)
833 size_t size, buffer_size;
835 buffer_size = binder_buffer_size(proc, buffer);
837 size = ALIGN(buffer->data_size, sizeof(void *)) +
838 ALIGN(buffer->offsets_size, sizeof(void *));
840 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
841 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
842 proc->pid, buffer, size, buffer_size);
844 BUG_ON(buffer->free);
845 BUG_ON(size > buffer_size);
846 BUG_ON(buffer->transaction != NULL);
847 BUG_ON((void *)buffer < proc->buffer);
848 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
850 if (buffer->async_transaction) {
851 proc->free_async_space += size + sizeof(struct binder_buffer);
853 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
854 "%d: binder_free_buf size %zd async free %zd\n",
855 proc->pid, size, proc->free_async_space);
858 binder_update_page_range(proc, 0,
859 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
860 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
862 rb_erase(&buffer->rb_node, &proc->allocated_buffers);
864 if (!list_is_last(&buffer->entry, &proc->buffers)) {
865 struct binder_buffer *next = list_entry(buffer->entry.next,
866 struct binder_buffer, entry);
869 rb_erase(&next->rb_node, &proc->free_buffers);
870 binder_delete_free_buffer(proc, next);
873 if (proc->buffers.next != &buffer->entry) {
874 struct binder_buffer *prev = list_entry(buffer->entry.prev,
875 struct binder_buffer, entry);
878 binder_delete_free_buffer(proc, buffer);
879 rb_erase(&prev->rb_node, &proc->free_buffers);
883 binder_insert_free_buffer(proc, buffer);
886 static struct binder_node *binder_get_node(struct binder_proc *proc,
887 binder_uintptr_t ptr)
889 struct rb_node *n = proc->nodes.rb_node;
890 struct binder_node *node;
893 node = rb_entry(n, struct binder_node, rb_node);
897 else if (ptr > node->ptr)
905 static struct binder_node *binder_new_node(struct binder_proc *proc,
906 binder_uintptr_t ptr,
907 binder_uintptr_t cookie)
909 struct rb_node **p = &proc->nodes.rb_node;
910 struct rb_node *parent = NULL;
911 struct binder_node *node;
915 node = rb_entry(parent, struct binder_node, rb_node);
919 else if (ptr > node->ptr)
925 node = kzalloc(sizeof(*node), GFP_KERNEL);
928 binder_stats_created(BINDER_STAT_NODE);
929 rb_link_node(&node->rb_node, parent, p);
930 rb_insert_color(&node->rb_node, &proc->nodes);
931 node->debug_id = ++binder_last_id;
934 node->cookie = cookie;
935 node->work.type = BINDER_WORK_NODE;
936 INIT_LIST_HEAD(&node->work.entry);
937 INIT_LIST_HEAD(&node->async_todo);
938 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
939 "%d:%d node %d u%016llx c%016llx created\n",
940 proc->pid, current->pid, node->debug_id,
941 (u64)node->ptr, (u64)node->cookie);
945 static int binder_inc_node(struct binder_node *node, int strong, int internal,
946 struct list_head *target_list)
950 if (target_list == NULL &&
951 node->internal_strong_refs == 0 &&
952 !(node == binder_context_mgr_node &&
953 node->has_strong_ref)) {
954 pr_err("invalid inc strong node for %d\n",
958 node->internal_strong_refs++;
960 node->local_strong_refs++;
961 if (!node->has_strong_ref && target_list) {
962 list_del_init(&node->work.entry);
963 list_add_tail(&node->work.entry, target_list);
967 node->local_weak_refs++;
968 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
969 if (target_list == NULL) {
970 pr_err("invalid inc weak node for %d\n",
974 list_add_tail(&node->work.entry, target_list);
980 static int binder_dec_node(struct binder_node *node, int strong, int internal)
984 node->internal_strong_refs--;
986 node->local_strong_refs--;
987 if (node->local_strong_refs || node->internal_strong_refs)
991 node->local_weak_refs--;
992 if (node->local_weak_refs || !hlist_empty(&node->refs))
995 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
996 if (list_empty(&node->work.entry)) {
997 list_add_tail(&node->work.entry, &node->proc->todo);
998 wake_up_interruptible(&node->proc->wait);
1001 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1002 !node->local_weak_refs) {
1003 list_del_init(&node->work.entry);
1005 rb_erase(&node->rb_node, &node->proc->nodes);
1006 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1007 "refless node %d deleted\n",
1010 hlist_del(&node->dead_node);
1011 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1012 "dead node %d deleted\n",
1016 binder_stats_deleted(BINDER_STAT_NODE);
1024 static struct binder_ref *binder_get_ref(struct binder_proc *proc,
1025 u32 desc, bool need_strong_ref)
1027 struct rb_node *n = proc->refs_by_desc.rb_node;
1028 struct binder_ref *ref;
1031 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1033 if (desc < ref->desc) {
1035 } else if (desc > ref->desc) {
1037 } else if (need_strong_ref && !ref->strong) {
1038 binder_user_error("tried to use weak ref as strong ref\n");
1047 static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1048 struct binder_node *node)
1051 struct rb_node **p = &proc->refs_by_node.rb_node;
1052 struct rb_node *parent = NULL;
1053 struct binder_ref *ref, *new_ref;
1057 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1059 if (node < ref->node)
1061 else if (node > ref->node)
1062 p = &(*p)->rb_right;
1066 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1067 if (new_ref == NULL)
1069 binder_stats_created(BINDER_STAT_REF);
1070 new_ref->debug_id = ++binder_last_id;
1071 new_ref->proc = proc;
1072 new_ref->node = node;
1073 rb_link_node(&new_ref->rb_node_node, parent, p);
1074 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1076 new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
1077 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1078 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1079 if (ref->desc > new_ref->desc)
1081 new_ref->desc = ref->desc + 1;
1084 p = &proc->refs_by_desc.rb_node;
1087 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1089 if (new_ref->desc < ref->desc)
1091 else if (new_ref->desc > ref->desc)
1092 p = &(*p)->rb_right;
1096 rb_link_node(&new_ref->rb_node_desc, parent, p);
1097 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1099 hlist_add_head(&new_ref->node_entry, &node->refs);
1101 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1102 "%d new ref %d desc %d for node %d\n",
1103 proc->pid, new_ref->debug_id, new_ref->desc,
1106 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1107 "%d new ref %d desc %d for dead node\n",
1108 proc->pid, new_ref->debug_id, new_ref->desc);
1113 static void binder_delete_ref(struct binder_ref *ref)
1115 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1116 "%d delete ref %d desc %d for node %d\n",
1117 ref->proc->pid, ref->debug_id, ref->desc,
1118 ref->node->debug_id);
1120 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1121 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1123 binder_dec_node(ref->node, 1, 1);
1124 hlist_del(&ref->node_entry);
1125 binder_dec_node(ref->node, 0, 1);
1127 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1128 "%d delete ref %d desc %d has death notification\n",
1129 ref->proc->pid, ref->debug_id, ref->desc);
1130 list_del(&ref->death->work.entry);
1132 binder_stats_deleted(BINDER_STAT_DEATH);
1135 binder_stats_deleted(BINDER_STAT_REF);
1138 static int binder_inc_ref(struct binder_ref *ref, int strong,
1139 struct list_head *target_list)
1144 if (ref->strong == 0) {
1145 ret = binder_inc_node(ref->node, 1, 1, target_list);
1151 if (ref->weak == 0) {
1152 ret = binder_inc_node(ref->node, 0, 1, target_list);
1162 static int binder_dec_ref(struct binder_ref *ref, int strong)
1165 if (ref->strong == 0) {
1166 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1167 ref->proc->pid, ref->debug_id,
1168 ref->desc, ref->strong, ref->weak);
1172 if (ref->strong == 0) {
1175 ret = binder_dec_node(ref->node, strong, 1);
1180 if (ref->weak == 0) {
1181 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1182 ref->proc->pid, ref->debug_id,
1183 ref->desc, ref->strong, ref->weak);
1188 if (ref->strong == 0 && ref->weak == 0)
1189 binder_delete_ref(ref);
1193 static void binder_pop_transaction(struct binder_thread *target_thread,
1194 struct binder_transaction *t)
1196 if (target_thread) {
1197 BUG_ON(target_thread->transaction_stack != t);
1198 BUG_ON(target_thread->transaction_stack->from != target_thread);
1199 target_thread->transaction_stack =
1200 target_thread->transaction_stack->from_parent;
1205 t->buffer->transaction = NULL;
1207 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1210 static void binder_send_failed_reply(struct binder_transaction *t,
1211 uint32_t error_code)
1213 struct binder_thread *target_thread;
1214 struct binder_transaction *next;
1216 BUG_ON(t->flags & TF_ONE_WAY);
1218 target_thread = t->from;
1219 if (target_thread) {
1220 if (target_thread->return_error != BR_OK &&
1221 target_thread->return_error2 == BR_OK) {
1222 target_thread->return_error2 =
1223 target_thread->return_error;
1224 target_thread->return_error = BR_OK;
1226 if (target_thread->return_error == BR_OK) {
1227 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1228 "send failed reply for transaction %d to %d:%d\n",
1230 target_thread->proc->pid,
1231 target_thread->pid);
1233 binder_pop_transaction(target_thread, t);
1234 target_thread->return_error = error_code;
1235 wake_up_interruptible(&target_thread->wait);
1237 pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
1238 target_thread->proc->pid,
1240 target_thread->return_error);
1244 next = t->from_parent;
1246 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1247 "send failed reply for transaction %d, target dead\n",
1250 binder_pop_transaction(target_thread, t);
1252 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1253 "reply failed, no target thread at root\n");
1257 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1258 "reply failed, no target thread -- retry %d\n",
1263 static void binder_transaction_buffer_release(struct binder_proc *proc,
1264 struct binder_buffer *buffer,
1265 binder_size_t *failed_at)
1267 binder_size_t *offp, *off_end;
1268 int debug_id = buffer->debug_id;
1270 binder_debug(BINDER_DEBUG_TRANSACTION,
1271 "%d buffer release %d, size %zd-%zd, failed at %pK\n",
1272 proc->pid, buffer->debug_id,
1273 buffer->data_size, buffer->offsets_size, failed_at);
1275 if (buffer->target_node)
1276 binder_dec_node(buffer->target_node, 1, 0);
1278 offp = (binder_size_t *)(buffer->data +
1279 ALIGN(buffer->data_size, sizeof(void *)));
1281 off_end = failed_at;
1283 off_end = (void *)offp + buffer->offsets_size;
1284 for (; offp < off_end; offp++) {
1285 struct flat_binder_object *fp;
1287 if (*offp > buffer->data_size - sizeof(*fp) ||
1288 buffer->data_size < sizeof(*fp) ||
1289 !IS_ALIGNED(*offp, sizeof(u32))) {
1290 pr_err("transaction release %d bad offset %lld, size %zd\n",
1291 debug_id, (u64)*offp, buffer->data_size);
1294 fp = (struct flat_binder_object *)(buffer->data + *offp);
1296 case BINDER_TYPE_BINDER:
1297 case BINDER_TYPE_WEAK_BINDER: {
1298 struct binder_node *node = binder_get_node(proc, fp->binder);
1301 pr_err("transaction release %d bad node %016llx\n",
1302 debug_id, (u64)fp->binder);
1305 binder_debug(BINDER_DEBUG_TRANSACTION,
1306 " node %d u%016llx\n",
1307 node->debug_id, (u64)node->ptr);
1308 binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
1310 case BINDER_TYPE_HANDLE:
1311 case BINDER_TYPE_WEAK_HANDLE: {
1312 struct binder_ref *ref;
1314 ref = binder_get_ref(proc, fp->handle,
1315 fp->type == BINDER_TYPE_HANDLE);
1318 pr_err("transaction release %d bad handle %d\n",
1319 debug_id, fp->handle);
1322 binder_debug(BINDER_DEBUG_TRANSACTION,
1323 " ref %d desc %d (node %d)\n",
1324 ref->debug_id, ref->desc, ref->node->debug_id);
1325 binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
1328 case BINDER_TYPE_FD:
1329 binder_debug(BINDER_DEBUG_TRANSACTION,
1330 " fd %d\n", fp->handle);
1332 task_close_fd(proc, fp->handle);
1336 pr_err("transaction release %d bad object type %x\n",
1337 debug_id, fp->type);
1343 static void binder_transaction(struct binder_proc *proc,
1344 struct binder_thread *thread,
1345 struct binder_transaction_data *tr, int reply)
1347 struct binder_transaction *t;
1348 struct binder_work *tcomplete;
1349 binder_size_t *offp, *off_end;
1350 binder_size_t off_min;
1351 struct binder_proc *target_proc;
1352 struct binder_thread *target_thread = NULL;
1353 struct binder_node *target_node = NULL;
1354 struct list_head *target_list;
1355 wait_queue_head_t *target_wait;
1356 struct binder_transaction *in_reply_to = NULL;
1357 struct binder_transaction_log_entry *e;
1358 uint32_t return_error;
1360 e = binder_transaction_log_add(&binder_transaction_log);
1361 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1362 e->from_proc = proc->pid;
1363 e->from_thread = thread->pid;
1364 e->target_handle = tr->target.handle;
1365 e->data_size = tr->data_size;
1366 e->offsets_size = tr->offsets_size;
1369 in_reply_to = thread->transaction_stack;
1370 if (in_reply_to == NULL) {
1371 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
1372 proc->pid, thread->pid);
1373 return_error = BR_FAILED_REPLY;
1374 goto err_empty_call_stack;
1376 binder_set_nice(in_reply_to->saved_priority);
1377 if (in_reply_to->to_thread != thread) {
1378 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
1379 proc->pid, thread->pid, in_reply_to->debug_id,
1380 in_reply_to->to_proc ?
1381 in_reply_to->to_proc->pid : 0,
1382 in_reply_to->to_thread ?
1383 in_reply_to->to_thread->pid : 0);
1384 return_error = BR_FAILED_REPLY;
1386 goto err_bad_call_stack;
1388 thread->transaction_stack = in_reply_to->to_parent;
1389 target_thread = in_reply_to->from;
1390 if (target_thread == NULL) {
1391 return_error = BR_DEAD_REPLY;
1392 goto err_dead_binder;
1394 if (target_thread->transaction_stack != in_reply_to) {
1395 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
1396 proc->pid, thread->pid,
1397 target_thread->transaction_stack ?
1398 target_thread->transaction_stack->debug_id : 0,
1399 in_reply_to->debug_id);
1400 return_error = BR_FAILED_REPLY;
1402 target_thread = NULL;
1403 goto err_dead_binder;
1405 target_proc = target_thread->proc;
1407 if (tr->target.handle) {
1408 struct binder_ref *ref;
1410 ref = binder_get_ref(proc, tr->target.handle, true);
1412 binder_user_error("%d:%d got transaction to invalid handle\n",
1413 proc->pid, thread->pid);
1414 return_error = BR_FAILED_REPLY;
1415 goto err_invalid_target_handle;
1417 target_node = ref->node;
1419 target_node = binder_context_mgr_node;
1420 if (target_node == NULL) {
1421 return_error = BR_DEAD_REPLY;
1422 goto err_no_context_mgr_node;
1425 e->to_node = target_node->debug_id;
1426 target_proc = target_node->proc;
1427 if (target_proc == NULL) {
1428 return_error = BR_DEAD_REPLY;
1429 goto err_dead_binder;
1431 if (WARN_ON(proc == target_proc)) {
1432 return_error = BR_FAILED_REPLY;
1433 goto err_invalid_target_handle;
1435 if (security_binder_transaction(proc->cred,
1436 target_proc->cred) < 0) {
1437 return_error = BR_FAILED_REPLY;
1438 goto err_invalid_target_handle;
1440 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1441 struct binder_transaction *tmp;
1443 tmp = thread->transaction_stack;
1444 if (tmp->to_thread != thread) {
1445 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
1446 proc->pid, thread->pid, tmp->debug_id,
1447 tmp->to_proc ? tmp->to_proc->pid : 0,
1449 tmp->to_thread->pid : 0);
1450 return_error = BR_FAILED_REPLY;
1451 goto err_bad_call_stack;
1454 if (tmp->from && tmp->from->proc == target_proc)
1455 target_thread = tmp->from;
1456 tmp = tmp->from_parent;
1460 if (target_thread) {
1461 e->to_thread = target_thread->pid;
1462 target_list = &target_thread->todo;
1463 target_wait = &target_thread->wait;
1465 target_list = &target_proc->todo;
1466 target_wait = &target_proc->wait;
1468 e->to_proc = target_proc->pid;
1470 /* TODO: reuse incoming transaction for reply */
1471 t = kzalloc(sizeof(*t), GFP_KERNEL);
1473 return_error = BR_FAILED_REPLY;
1474 goto err_alloc_t_failed;
1476 binder_stats_created(BINDER_STAT_TRANSACTION);
1478 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1479 if (tcomplete == NULL) {
1480 return_error = BR_FAILED_REPLY;
1481 goto err_alloc_tcomplete_failed;
1483 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1485 t->debug_id = ++binder_last_id;
1486 e->debug_id = t->debug_id;
1489 binder_debug(BINDER_DEBUG_TRANSACTION,
1490 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
1491 proc->pid, thread->pid, t->debug_id,
1492 target_proc->pid, target_thread->pid,
1493 (u64)tr->data.ptr.buffer,
1494 (u64)tr->data.ptr.offsets,
1495 (u64)tr->data_size, (u64)tr->offsets_size);
1497 binder_debug(BINDER_DEBUG_TRANSACTION,
1498 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
1499 proc->pid, thread->pid, t->debug_id,
1500 target_proc->pid, target_node->debug_id,
1501 (u64)tr->data.ptr.buffer,
1502 (u64)tr->data.ptr.offsets,
1503 (u64)tr->data_size, (u64)tr->offsets_size);
1505 if (!reply && !(tr->flags & TF_ONE_WAY))
1509 t->sender_euid = task_euid(proc->tsk);
1510 t->to_proc = target_proc;
1511 t->to_thread = target_thread;
1513 t->flags = tr->flags;
1514 t->priority = task_nice(current);
1516 trace_binder_transaction(reply, t, target_node);
1518 t->buffer = binder_alloc_buf(target_proc, tr->data_size,
1519 tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
1520 if (t->buffer == NULL) {
1521 return_error = BR_FAILED_REPLY;
1522 goto err_binder_alloc_buf_failed;
1524 t->buffer->allow_user_free = 0;
1525 t->buffer->debug_id = t->debug_id;
1526 t->buffer->transaction = t;
1527 t->buffer->target_node = target_node;
1528 trace_binder_transaction_alloc_buf(t->buffer);
1530 binder_inc_node(target_node, 1, 0, NULL);
1532 offp = (binder_size_t *)(t->buffer->data +
1533 ALIGN(tr->data_size, sizeof(void *)));
1535 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
1536 tr->data.ptr.buffer, tr->data_size)) {
1537 binder_user_error("%d:%d got transaction with invalid data ptr\n",
1538 proc->pid, thread->pid);
1539 return_error = BR_FAILED_REPLY;
1540 goto err_copy_data_failed;
1542 if (copy_from_user(offp, (const void __user *)(uintptr_t)
1543 tr->data.ptr.offsets, tr->offsets_size)) {
1544 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1545 proc->pid, thread->pid);
1546 return_error = BR_FAILED_REPLY;
1547 goto err_copy_data_failed;
1549 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
1550 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
1551 proc->pid, thread->pid, (u64)tr->offsets_size);
1552 return_error = BR_FAILED_REPLY;
1553 goto err_bad_offset;
1555 off_end = (void *)offp + tr->offsets_size;
1557 for (; offp < off_end; offp++) {
1558 struct flat_binder_object *fp;
1560 if (*offp > t->buffer->data_size - sizeof(*fp) ||
1562 t->buffer->data_size < sizeof(*fp) ||
1563 !IS_ALIGNED(*offp, sizeof(u32))) {
1564 binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
1565 proc->pid, thread->pid, (u64)*offp,
1567 (u64)(t->buffer->data_size -
1569 return_error = BR_FAILED_REPLY;
1570 goto err_bad_offset;
1572 fp = (struct flat_binder_object *)(t->buffer->data + *offp);
1573 off_min = *offp + sizeof(struct flat_binder_object);
1575 case BINDER_TYPE_BINDER:
1576 case BINDER_TYPE_WEAK_BINDER: {
1577 struct binder_ref *ref;
1578 struct binder_node *node = binder_get_node(proc, fp->binder);
1581 node = binder_new_node(proc, fp->binder, fp->cookie);
1583 return_error = BR_FAILED_REPLY;
1584 goto err_binder_new_node_failed;
1586 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1587 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1589 if (fp->cookie != node->cookie) {
1590 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1591 proc->pid, thread->pid,
1592 (u64)fp->binder, node->debug_id,
1593 (u64)fp->cookie, (u64)node->cookie);
1594 return_error = BR_FAILED_REPLY;
1595 goto err_binder_get_ref_for_node_failed;
1597 if (security_binder_transfer_binder(proc->cred,
1598 target_proc->cred)) {
1599 return_error = BR_FAILED_REPLY;
1600 goto err_binder_get_ref_for_node_failed;
1602 ref = binder_get_ref_for_node(target_proc, node);
1604 return_error = BR_FAILED_REPLY;
1605 goto err_binder_get_ref_for_node_failed;
1607 if (fp->type == BINDER_TYPE_BINDER)
1608 fp->type = BINDER_TYPE_HANDLE;
1610 fp->type = BINDER_TYPE_WEAK_HANDLE;
1612 fp->handle = ref->desc;
1614 binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
1617 trace_binder_transaction_node_to_ref(t, node, ref);
1618 binder_debug(BINDER_DEBUG_TRANSACTION,
1619 " node %d u%016llx -> ref %d desc %d\n",
1620 node->debug_id, (u64)node->ptr,
1621 ref->debug_id, ref->desc);
1623 case BINDER_TYPE_HANDLE:
1624 case BINDER_TYPE_WEAK_HANDLE: {
1625 struct binder_ref *ref;
1627 ref = binder_get_ref(proc, fp->handle,
1628 fp->type == BINDER_TYPE_HANDLE);
1631 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1633 thread->pid, fp->handle);
1634 return_error = BR_FAILED_REPLY;
1635 goto err_binder_get_ref_failed;
1637 if (security_binder_transfer_binder(proc->cred,
1638 target_proc->cred)) {
1639 return_error = BR_FAILED_REPLY;
1640 goto err_binder_get_ref_failed;
1642 if (ref->node->proc == target_proc) {
1643 if (fp->type == BINDER_TYPE_HANDLE)
1644 fp->type = BINDER_TYPE_BINDER;
1646 fp->type = BINDER_TYPE_WEAK_BINDER;
1647 fp->binder = ref->node->ptr;
1648 fp->cookie = ref->node->cookie;
1649 binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
1650 trace_binder_transaction_ref_to_node(t, ref);
1651 binder_debug(BINDER_DEBUG_TRANSACTION,
1652 " ref %d desc %d -> node %d u%016llx\n",
1653 ref->debug_id, ref->desc, ref->node->debug_id,
1654 (u64)ref->node->ptr);
1656 struct binder_ref *new_ref;
1658 new_ref = binder_get_ref_for_node(target_proc, ref->node);
1659 if (new_ref == NULL) {
1660 return_error = BR_FAILED_REPLY;
1661 goto err_binder_get_ref_for_node_failed;
1664 fp->handle = new_ref->desc;
1666 binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
1667 trace_binder_transaction_ref_to_ref(t, ref,
1669 binder_debug(BINDER_DEBUG_TRANSACTION,
1670 " ref %d desc %d -> ref %d desc %d (node %d)\n",
1671 ref->debug_id, ref->desc, new_ref->debug_id,
1672 new_ref->desc, ref->node->debug_id);
1676 case BINDER_TYPE_FD: {
1681 if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
1682 binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
1683 proc->pid, thread->pid, fp->handle);
1684 return_error = BR_FAILED_REPLY;
1685 goto err_fd_not_allowed;
1687 } else if (!target_node->accept_fds) {
1688 binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
1689 proc->pid, thread->pid, fp->handle);
1690 return_error = BR_FAILED_REPLY;
1691 goto err_fd_not_allowed;
1694 file = fget(fp->handle);
1696 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1697 proc->pid, thread->pid, fp->handle);
1698 return_error = BR_FAILED_REPLY;
1699 goto err_fget_failed;
1701 if (security_binder_transfer_file(proc->cred,
1705 return_error = BR_FAILED_REPLY;
1706 goto err_get_unused_fd_failed;
1708 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1709 if (target_fd < 0) {
1711 return_error = BR_FAILED_REPLY;
1712 goto err_get_unused_fd_failed;
1714 task_fd_install(target_proc, target_fd, file);
1715 trace_binder_transaction_fd(t, fp->handle, target_fd);
1716 binder_debug(BINDER_DEBUG_TRANSACTION,
1717 " fd %d -> %d\n", fp->handle, target_fd);
1720 fp->handle = target_fd;
1724 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
1725 proc->pid, thread->pid, fp->type);
1726 return_error = BR_FAILED_REPLY;
1727 goto err_bad_object_type;
1731 BUG_ON(t->buffer->async_transaction != 0);
1732 binder_pop_transaction(target_thread, in_reply_to);
1733 } else if (!(t->flags & TF_ONE_WAY)) {
1734 BUG_ON(t->buffer->async_transaction != 0);
1736 t->from_parent = thread->transaction_stack;
1737 thread->transaction_stack = t;
1739 BUG_ON(target_node == NULL);
1740 BUG_ON(t->buffer->async_transaction != 1);
1741 if (target_node->has_async_transaction) {
1742 target_list = &target_node->async_todo;
1745 target_node->has_async_transaction = 1;
1747 t->work.type = BINDER_WORK_TRANSACTION;
1748 list_add_tail(&t->work.entry, target_list);
1749 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1750 list_add_tail(&tcomplete->entry, &thread->todo);
1752 if (reply || !(t->flags & TF_ONE_WAY))
1753 wake_up_interruptible_sync(target_wait);
1755 wake_up_interruptible(target_wait);
1759 err_get_unused_fd_failed:
1762 err_binder_get_ref_for_node_failed:
1763 err_binder_get_ref_failed:
1764 err_binder_new_node_failed:
1765 err_bad_object_type:
1767 err_copy_data_failed:
1768 trace_binder_transaction_failed_buffer_release(t->buffer);
1769 binder_transaction_buffer_release(target_proc, t->buffer, offp);
1770 t->buffer->transaction = NULL;
1771 binder_free_buf(target_proc, t->buffer);
1772 err_binder_alloc_buf_failed:
1774 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
1775 err_alloc_tcomplete_failed:
1777 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1780 err_empty_call_stack:
1782 err_invalid_target_handle:
1783 err_no_context_mgr_node:
1784 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1785 "%d:%d transaction failed %d, size %lld-%lld\n",
1786 proc->pid, thread->pid, return_error,
1787 (u64)tr->data_size, (u64)tr->offsets_size);
1790 struct binder_transaction_log_entry *fe;
1792 fe = binder_transaction_log_add(&binder_transaction_log_failed);
1796 BUG_ON(thread->return_error != BR_OK);
1798 thread->return_error = BR_TRANSACTION_COMPLETE;
1799 binder_send_failed_reply(in_reply_to, return_error);
1801 thread->return_error = return_error;
1804 static int binder_thread_write(struct binder_proc *proc,
1805 struct binder_thread *thread,
1806 binder_uintptr_t binder_buffer, size_t size,
1807 binder_size_t *consumed)
1810 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
1811 void __user *ptr = buffer + *consumed;
1812 void __user *end = buffer + size;
1814 while (ptr < end && thread->return_error == BR_OK) {
1815 if (get_user(cmd, (uint32_t __user *)ptr))
1817 ptr += sizeof(uint32_t);
1818 trace_binder_command(cmd);
1819 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
1820 binder_stats.bc[_IOC_NR(cmd)]++;
1821 proc->stats.bc[_IOC_NR(cmd)]++;
1822 thread->stats.bc[_IOC_NR(cmd)]++;
1830 struct binder_ref *ref;
1831 const char *debug_string;
1833 if (get_user(target, (uint32_t __user *)ptr))
1835 ptr += sizeof(uint32_t);
1836 if (target == 0 && binder_context_mgr_node &&
1837 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
1838 if (binder_context_mgr_node->proc == proc) {
1839 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
1840 proc->pid, thread->pid);
1843 ref = binder_get_ref_for_node(proc,
1844 binder_context_mgr_node);
1845 if (ref->desc != target) {
1846 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
1847 proc->pid, thread->pid,
1851 ref = binder_get_ref(proc, target,
1852 cmd == BC_ACQUIRE ||
1855 binder_user_error("%d:%d refcount change on invalid ref %d\n",
1856 proc->pid, thread->pid, target);
1861 debug_string = "IncRefs";
1862 binder_inc_ref(ref, 0, NULL);
1865 debug_string = "Acquire";
1866 binder_inc_ref(ref, 1, NULL);
1869 debug_string = "Release";
1870 binder_dec_ref(ref, 1);
1874 debug_string = "DecRefs";
1875 binder_dec_ref(ref, 0);
1878 binder_debug(BINDER_DEBUG_USER_REFS,
1879 "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
1880 proc->pid, thread->pid, debug_string, ref->debug_id,
1881 ref->desc, ref->strong, ref->weak, ref->node->debug_id);
1884 case BC_INCREFS_DONE:
1885 case BC_ACQUIRE_DONE: {
1886 binder_uintptr_t node_ptr;
1887 binder_uintptr_t cookie;
1888 struct binder_node *node;
1890 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
1892 ptr += sizeof(binder_uintptr_t);
1893 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
1895 ptr += sizeof(binder_uintptr_t);
1896 node = binder_get_node(proc, node_ptr);
1898 binder_user_error("%d:%d %s u%016llx no match\n",
1899 proc->pid, thread->pid,
1900 cmd == BC_INCREFS_DONE ?
1906 if (cookie != node->cookie) {
1907 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
1908 proc->pid, thread->pid,
1909 cmd == BC_INCREFS_DONE ?
1910 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1911 (u64)node_ptr, node->debug_id,
1912 (u64)cookie, (u64)node->cookie);
1915 if (cmd == BC_ACQUIRE_DONE) {
1916 if (node->pending_strong_ref == 0) {
1917 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
1918 proc->pid, thread->pid,
1922 node->pending_strong_ref = 0;
1924 if (node->pending_weak_ref == 0) {
1925 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
1926 proc->pid, thread->pid,
1930 node->pending_weak_ref = 0;
1932 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
1933 binder_debug(BINDER_DEBUG_USER_REFS,
1934 "%d:%d %s node %d ls %d lw %d\n",
1935 proc->pid, thread->pid,
1936 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1937 node->debug_id, node->local_strong_refs, node->local_weak_refs);
1940 case BC_ATTEMPT_ACQUIRE:
1941 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
1943 case BC_ACQUIRE_RESULT:
1944 pr_err("BC_ACQUIRE_RESULT not supported\n");
1947 case BC_FREE_BUFFER: {
1948 binder_uintptr_t data_ptr;
1949 struct binder_buffer *buffer;
1951 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
1953 ptr += sizeof(binder_uintptr_t);
1955 buffer = binder_buffer_lookup(proc, data_ptr);
1956 if (buffer == NULL) {
1957 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
1958 proc->pid, thread->pid, (u64)data_ptr);
1961 if (!buffer->allow_user_free) {
1962 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
1963 proc->pid, thread->pid, (u64)data_ptr);
1966 binder_debug(BINDER_DEBUG_FREE_BUFFER,
1967 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
1968 proc->pid, thread->pid, (u64)data_ptr,
1970 buffer->transaction ? "active" : "finished");
1972 if (buffer->transaction) {
1973 buffer->transaction->buffer = NULL;
1974 buffer->transaction = NULL;
1976 if (buffer->async_transaction && buffer->target_node) {
1977 BUG_ON(!buffer->target_node->has_async_transaction);
1978 if (list_empty(&buffer->target_node->async_todo))
1979 buffer->target_node->has_async_transaction = 0;
1981 list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
1983 trace_binder_transaction_buffer_release(buffer);
1984 binder_transaction_buffer_release(proc, buffer, NULL);
1985 binder_free_buf(proc, buffer);
1989 case BC_TRANSACTION:
1991 struct binder_transaction_data tr;
1993 if (copy_from_user(&tr, ptr, sizeof(tr)))
1996 binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
2000 case BC_REGISTER_LOOPER:
2001 binder_debug(BINDER_DEBUG_THREADS,
2002 "%d:%d BC_REGISTER_LOOPER\n",
2003 proc->pid, thread->pid);
2004 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
2005 thread->looper |= BINDER_LOOPER_STATE_INVALID;
2006 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
2007 proc->pid, thread->pid);
2008 } else if (proc->requested_threads == 0) {
2009 thread->looper |= BINDER_LOOPER_STATE_INVALID;
2010 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
2011 proc->pid, thread->pid);
2013 proc->requested_threads--;
2014 proc->requested_threads_started++;
2016 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
2018 case BC_ENTER_LOOPER:
2019 binder_debug(BINDER_DEBUG_THREADS,
2020 "%d:%d BC_ENTER_LOOPER\n",
2021 proc->pid, thread->pid);
2022 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
2023 thread->looper |= BINDER_LOOPER_STATE_INVALID;
2024 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
2025 proc->pid, thread->pid);
2027 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
2029 case BC_EXIT_LOOPER:
2030 binder_debug(BINDER_DEBUG_THREADS,
2031 "%d:%d BC_EXIT_LOOPER\n",
2032 proc->pid, thread->pid);
2033 thread->looper |= BINDER_LOOPER_STATE_EXITED;
2036 case BC_REQUEST_DEATH_NOTIFICATION:
2037 case BC_CLEAR_DEATH_NOTIFICATION: {
2039 binder_uintptr_t cookie;
2040 struct binder_ref *ref;
2041 struct binder_ref_death *death;
2043 if (get_user(target, (uint32_t __user *)ptr))
2045 ptr += sizeof(uint32_t);
2046 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2048 ptr += sizeof(binder_uintptr_t);
2049 ref = binder_get_ref(proc, target, false);
2051 binder_user_error("%d:%d %s invalid ref %d\n",
2052 proc->pid, thread->pid,
2053 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2054 "BC_REQUEST_DEATH_NOTIFICATION" :
2055 "BC_CLEAR_DEATH_NOTIFICATION",
2060 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2061 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
2062 proc->pid, thread->pid,
2063 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2064 "BC_REQUEST_DEATH_NOTIFICATION" :
2065 "BC_CLEAR_DEATH_NOTIFICATION",
2066 (u64)cookie, ref->debug_id, ref->desc,
2067 ref->strong, ref->weak, ref->node->debug_id);
2069 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2071 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
2072 proc->pid, thread->pid);
2075 death = kzalloc(sizeof(*death), GFP_KERNEL);
2076 if (death == NULL) {
2077 thread->return_error = BR_ERROR;
2078 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2079 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
2080 proc->pid, thread->pid);
2083 binder_stats_created(BINDER_STAT_DEATH);
2084 INIT_LIST_HEAD(&death->work.entry);
2085 death->cookie = cookie;
2087 if (ref->node->proc == NULL) {
2088 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2089 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2090 list_add_tail(&ref->death->work.entry, &thread->todo);
2092 list_add_tail(&ref->death->work.entry, &proc->todo);
2093 wake_up_interruptible(&proc->wait);
2097 if (ref->death == NULL) {
2098 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
2099 proc->pid, thread->pid);
2103 if (death->cookie != cookie) {
2104 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
2105 proc->pid, thread->pid,
2111 if (list_empty(&death->work.entry)) {
2112 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2113 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2114 list_add_tail(&death->work.entry, &thread->todo);
2116 list_add_tail(&death->work.entry, &proc->todo);
2117 wake_up_interruptible(&proc->wait);
2120 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2121 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2125 case BC_DEAD_BINDER_DONE: {
2126 struct binder_work *w;
2127 binder_uintptr_t cookie;
2128 struct binder_ref_death *death = NULL;
2130 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2133 ptr += sizeof(cookie);
2134 list_for_each_entry(w, &proc->delivered_death, entry) {
2135 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2137 if (tmp_death->cookie == cookie) {
2142 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2143 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
2144 proc->pid, thread->pid, (u64)cookie,
2146 if (death == NULL) {
2147 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2148 proc->pid, thread->pid, (u64)cookie);
2152 list_del_init(&death->work.entry);
2153 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2154 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2155 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2156 list_add_tail(&death->work.entry, &thread->todo);
2158 list_add_tail(&death->work.entry, &proc->todo);
2159 wake_up_interruptible(&proc->wait);
2165 pr_err("%d:%d unknown command %d\n",
2166 proc->pid, thread->pid, cmd);
2169 *consumed = ptr - buffer;
2174 static void binder_stat_br(struct binder_proc *proc,
2175 struct binder_thread *thread, uint32_t cmd)
2177 trace_binder_return(cmd);
2178 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2179 binder_stats.br[_IOC_NR(cmd)]++;
2180 proc->stats.br[_IOC_NR(cmd)]++;
2181 thread->stats.br[_IOC_NR(cmd)]++;
2185 static int binder_has_proc_work(struct binder_proc *proc,
2186 struct binder_thread *thread)
2188 return !list_empty(&proc->todo) ||
2189 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2192 static int binder_has_thread_work(struct binder_thread *thread)
2194 return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2195 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2198 static int binder_thread_read(struct binder_proc *proc,
2199 struct binder_thread *thread,
2200 binder_uintptr_t binder_buffer, size_t size,
2201 binder_size_t *consumed, int non_block)
2203 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
2204 void __user *ptr = buffer + *consumed;
2205 void __user *end = buffer + size;
2208 int wait_for_proc_work;
2210 if (*consumed == 0) {
2211 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2213 ptr += sizeof(uint32_t);
2217 wait_for_proc_work = thread->transaction_stack == NULL &&
2218 list_empty(&thread->todo);
2220 if (thread->return_error != BR_OK && ptr < end) {
2221 if (thread->return_error2 != BR_OK) {
2222 if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2224 ptr += sizeof(uint32_t);
2225 binder_stat_br(proc, thread, thread->return_error2);
2228 thread->return_error2 = BR_OK;
2230 if (put_user(thread->return_error, (uint32_t __user *)ptr))
2232 ptr += sizeof(uint32_t);
2233 binder_stat_br(proc, thread, thread->return_error);
2234 thread->return_error = BR_OK;
2239 thread->looper |= BINDER_LOOPER_STATE_WAITING;
2240 if (wait_for_proc_work)
2241 proc->ready_threads++;
2243 binder_unlock(__func__);
2245 trace_binder_wait_for_work(wait_for_proc_work,
2246 !!thread->transaction_stack,
2247 !list_empty(&thread->todo));
2248 if (wait_for_proc_work) {
2249 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2250 BINDER_LOOPER_STATE_ENTERED))) {
2251 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
2252 proc->pid, thread->pid, thread->looper);
2253 wait_event_interruptible(binder_user_error_wait,
2254 binder_stop_on_user_error < 2);
2256 binder_set_nice(proc->default_priority);
2258 if (!binder_has_proc_work(proc, thread))
2261 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2264 if (!binder_has_thread_work(thread))
2267 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
2270 binder_lock(__func__);
2272 if (wait_for_proc_work)
2273 proc->ready_threads--;
2274 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2281 struct binder_transaction_data tr;
2282 struct binder_work *w;
2283 struct binder_transaction *t = NULL;
2285 if (!list_empty(&thread->todo)) {
2286 w = list_first_entry(&thread->todo, struct binder_work,
2288 } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
2289 w = list_first_entry(&proc->todo, struct binder_work,
2293 if (ptr - buffer == 4 &&
2294 !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
2299 if (end - ptr < sizeof(tr) + 4)
2303 case BINDER_WORK_TRANSACTION: {
2304 t = container_of(w, struct binder_transaction, work);
2306 case BINDER_WORK_TRANSACTION_COMPLETE: {
2307 cmd = BR_TRANSACTION_COMPLETE;
2308 if (put_user(cmd, (uint32_t __user *)ptr))
2310 ptr += sizeof(uint32_t);
2312 binder_stat_br(proc, thread, cmd);
2313 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
2314 "%d:%d BR_TRANSACTION_COMPLETE\n",
2315 proc->pid, thread->pid);
2317 list_del(&w->entry);
2319 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2321 case BINDER_WORK_NODE: {
2322 struct binder_node *node = container_of(w, struct binder_node, work);
2323 uint32_t cmd = BR_NOOP;
2324 const char *cmd_name;
2325 int strong = node->internal_strong_refs || node->local_strong_refs;
2326 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
2328 if (weak && !node->has_weak_ref) {
2330 cmd_name = "BR_INCREFS";
2331 node->has_weak_ref = 1;
2332 node->pending_weak_ref = 1;
2333 node->local_weak_refs++;
2334 } else if (strong && !node->has_strong_ref) {
2336 cmd_name = "BR_ACQUIRE";
2337 node->has_strong_ref = 1;
2338 node->pending_strong_ref = 1;
2339 node->local_strong_refs++;
2340 } else if (!strong && node->has_strong_ref) {
2342 cmd_name = "BR_RELEASE";
2343 node->has_strong_ref = 0;
2344 } else if (!weak && node->has_weak_ref) {
2346 cmd_name = "BR_DECREFS";
2347 node->has_weak_ref = 0;
2349 if (cmd != BR_NOOP) {
2350 if (put_user(cmd, (uint32_t __user *)ptr))
2352 ptr += sizeof(uint32_t);
2353 if (put_user(node->ptr,
2354 (binder_uintptr_t __user *)ptr))
2356 ptr += sizeof(binder_uintptr_t);
2357 if (put_user(node->cookie,
2358 (binder_uintptr_t __user *)ptr))
2360 ptr += sizeof(binder_uintptr_t);
2362 binder_stat_br(proc, thread, cmd);
2363 binder_debug(BINDER_DEBUG_USER_REFS,
2364 "%d:%d %s %d u%016llx c%016llx\n",
2365 proc->pid, thread->pid, cmd_name,
2367 (u64)node->ptr, (u64)node->cookie);
2369 list_del_init(&w->entry);
2370 if (!weak && !strong) {
2371 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2372 "%d:%d node %d u%016llx c%016llx deleted\n",
2373 proc->pid, thread->pid,
2377 rb_erase(&node->rb_node, &proc->nodes);
2379 binder_stats_deleted(BINDER_STAT_NODE);
2381 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2382 "%d:%d node %d u%016llx c%016llx state unchanged\n",
2383 proc->pid, thread->pid,
2390 case BINDER_WORK_DEAD_BINDER:
2391 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2392 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2393 struct binder_ref_death *death;
2396 death = container_of(w, struct binder_ref_death, work);
2397 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2398 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2400 cmd = BR_DEAD_BINDER;
2401 if (put_user(cmd, (uint32_t __user *)ptr))
2403 ptr += sizeof(uint32_t);
2404 if (put_user(death->cookie,
2405 (binder_uintptr_t __user *)ptr))
2407 ptr += sizeof(binder_uintptr_t);
2408 binder_stat_br(proc, thread, cmd);
2409 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2410 "%d:%d %s %016llx\n",
2411 proc->pid, thread->pid,
2412 cmd == BR_DEAD_BINDER ?
2414 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
2415 (u64)death->cookie);
2417 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2418 list_del(&w->entry);
2420 binder_stats_deleted(BINDER_STAT_DEATH);
2422 list_move(&w->entry, &proc->delivered_death);
2423 if (cmd == BR_DEAD_BINDER)
2424 goto done; /* DEAD_BINDER notifications can cause transactions */
2431 BUG_ON(t->buffer == NULL);
2432 if (t->buffer->target_node) {
2433 struct binder_node *target_node = t->buffer->target_node;
2435 tr.target.ptr = target_node->ptr;
2436 tr.cookie = target_node->cookie;
2437 t->saved_priority = task_nice(current);
2438 if (t->priority < target_node->min_priority &&
2439 !(t->flags & TF_ONE_WAY))
2440 binder_set_nice(t->priority);
2441 else if (!(t->flags & TF_ONE_WAY) ||
2442 t->saved_priority > target_node->min_priority)
2443 binder_set_nice(target_node->min_priority);
2444 cmd = BR_TRANSACTION;
2451 tr.flags = t->flags;
2452 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
2455 struct task_struct *sender = t->from->proc->tsk;
2457 tr.sender_pid = task_tgid_nr_ns(sender,
2458 task_active_pid_ns(current));
2463 tr.data_size = t->buffer->data_size;
2464 tr.offsets_size = t->buffer->offsets_size;
2465 tr.data.ptr.buffer = (binder_uintptr_t)(
2466 (uintptr_t)t->buffer->data +
2467 proc->user_buffer_offset);
2468 tr.data.ptr.offsets = tr.data.ptr.buffer +
2469 ALIGN(t->buffer->data_size,
2472 if (put_user(cmd, (uint32_t __user *)ptr))
2474 ptr += sizeof(uint32_t);
2475 if (copy_to_user(ptr, &tr, sizeof(tr)))
2479 trace_binder_transaction_received(t);
2480 binder_stat_br(proc, thread, cmd);
2481 binder_debug(BINDER_DEBUG_TRANSACTION,
2482 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
2483 proc->pid, thread->pid,
2484 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2486 t->debug_id, t->from ? t->from->proc->pid : 0,
2487 t->from ? t->from->pid : 0, cmd,
2488 t->buffer->data_size, t->buffer->offsets_size,
2489 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
2491 list_del(&t->work.entry);
2492 t->buffer->allow_user_free = 1;
2493 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2494 t->to_parent = thread->transaction_stack;
2495 t->to_thread = thread;
2496 thread->transaction_stack = t;
2498 t->buffer->transaction = NULL;
2500 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2507 *consumed = ptr - buffer;
2508 if (proc->requested_threads + proc->ready_threads == 0 &&
2509 proc->requested_threads_started < proc->max_threads &&
2510 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2511 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2512 /*spawn a new thread if we leave this out */) {
2513 proc->requested_threads++;
2514 binder_debug(BINDER_DEBUG_THREADS,
2515 "%d:%d BR_SPAWN_LOOPER\n",
2516 proc->pid, thread->pid);
2517 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2519 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
2524 static void binder_release_work(struct list_head *list)
2526 struct binder_work *w;
2528 while (!list_empty(list)) {
2529 w = list_first_entry(list, struct binder_work, entry);
2530 list_del_init(&w->entry);
2532 case BINDER_WORK_TRANSACTION: {
2533 struct binder_transaction *t;
2535 t = container_of(w, struct binder_transaction, work);
2536 if (t->buffer->target_node &&
2537 !(t->flags & TF_ONE_WAY)) {
2538 binder_send_failed_reply(t, BR_DEAD_REPLY);
2540 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2541 "undelivered transaction %d\n",
2543 t->buffer->transaction = NULL;
2545 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2548 case BINDER_WORK_TRANSACTION_COMPLETE: {
2549 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2550 "undelivered TRANSACTION_COMPLETE\n");
2552 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2554 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2555 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2556 struct binder_ref_death *death;
2558 death = container_of(w, struct binder_ref_death, work);
2559 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2560 "undelivered death notification, %016llx\n",
2561 (u64)death->cookie);
2563 binder_stats_deleted(BINDER_STAT_DEATH);
2566 pr_err("unexpected work type, %d, not freed\n",
2574 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2576 struct binder_thread *thread = NULL;
2577 struct rb_node *parent = NULL;
2578 struct rb_node **p = &proc->threads.rb_node;
2582 thread = rb_entry(parent, struct binder_thread, rb_node);
2584 if (current->pid < thread->pid)
2586 else if (current->pid > thread->pid)
2587 p = &(*p)->rb_right;
2592 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2595 binder_stats_created(BINDER_STAT_THREAD);
2596 thread->proc = proc;
2597 thread->pid = current->pid;
2598 init_waitqueue_head(&thread->wait);
2599 INIT_LIST_HEAD(&thread->todo);
2600 rb_link_node(&thread->rb_node, parent, p);
2601 rb_insert_color(&thread->rb_node, &proc->threads);
2602 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2603 thread->return_error = BR_OK;
2604 thread->return_error2 = BR_OK;
2609 static int binder_free_thread(struct binder_proc *proc,
2610 struct binder_thread *thread)
2612 struct binder_transaction *t;
2613 struct binder_transaction *send_reply = NULL;
2614 int active_transactions = 0;
2616 rb_erase(&thread->rb_node, &proc->threads);
2617 t = thread->transaction_stack;
2618 if (t && t->to_thread == thread)
2621 active_transactions++;
2622 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2623 "release %d:%d transaction %d %s, still active\n",
2624 proc->pid, thread->pid,
2626 (t->to_thread == thread) ? "in" : "out");
2628 if (t->to_thread == thread) {
2630 t->to_thread = NULL;
2632 t->buffer->transaction = NULL;
2636 } else if (t->from == thread) {
2644 * If this thread used poll, make sure we remove the waitqueue from any
2645 * poll data structures holding it.
2647 if (thread->looper & BINDER_LOOPER_STATE_POLL)
2648 wake_up_pollfree(&thread->wait);
2651 * This is needed to avoid races between wake_up_pollfree() above and
2652 * someone else removing the last entry from the queue for other reasons
2653 * (e.g. ep_remove_wait_queue() being called due to an epoll file
2654 * descriptor being closed). Such other users hold an RCU read lock, so
2655 * we can be sure they're done after we call synchronize_rcu().
2657 if (thread->looper & BINDER_LOOPER_STATE_POLL)
2661 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
2662 binder_release_work(&thread->todo);
2664 binder_stats_deleted(BINDER_STAT_THREAD);
2665 return active_transactions;
2668 static unsigned int binder_poll(struct file *filp,
2669 struct poll_table_struct *wait)
2671 struct binder_proc *proc = filp->private_data;
2672 struct binder_thread *thread = NULL;
2673 int wait_for_proc_work;
2675 binder_lock(__func__);
2677 thread = binder_get_thread(proc);
2679 binder_unlock(__func__);
2683 thread->looper |= BINDER_LOOPER_STATE_POLL;
2685 wait_for_proc_work = thread->transaction_stack == NULL &&
2686 list_empty(&thread->todo) && thread->return_error == BR_OK;
2688 binder_unlock(__func__);
2690 if (wait_for_proc_work) {
2691 if (binder_has_proc_work(proc, thread))
2693 poll_wait(filp, &proc->wait, wait);
2694 if (binder_has_proc_work(proc, thread))
2697 if (binder_has_thread_work(thread))
2699 poll_wait(filp, &thread->wait, wait);
2700 if (binder_has_thread_work(thread))
2706 static int binder_ioctl_write_read(struct file *filp,
2707 unsigned int cmd, unsigned long arg,
2708 struct binder_thread *thread)
2711 struct binder_proc *proc = filp->private_data;
2712 unsigned int size = _IOC_SIZE(cmd);
2713 void __user *ubuf = (void __user *)arg;
2714 struct binder_write_read bwr;
2716 if (size != sizeof(struct binder_write_read)) {
2720 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
2724 binder_debug(BINDER_DEBUG_READ_WRITE,
2725 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
2726 proc->pid, thread->pid,
2727 (u64)bwr.write_size, (u64)bwr.write_buffer,
2728 (u64)bwr.read_size, (u64)bwr.read_buffer);
2730 if (bwr.write_size > 0) {
2731 ret = binder_thread_write(proc, thread,
2734 &bwr.write_consumed);
2735 trace_binder_write_done(ret);
2737 bwr.read_consumed = 0;
2738 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2743 if (bwr.read_size > 0) {
2744 ret = binder_thread_read(proc, thread, bwr.read_buffer,
2747 filp->f_flags & O_NONBLOCK);
2748 trace_binder_read_done(ret);
2749 if (!list_empty(&proc->todo))
2750 wake_up_interruptible(&proc->wait);
2752 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2757 binder_debug(BINDER_DEBUG_READ_WRITE,
2758 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
2759 proc->pid, thread->pid,
2760 (u64)bwr.write_consumed, (u64)bwr.write_size,
2761 (u64)bwr.read_consumed, (u64)bwr.read_size);
2762 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
2770 static int binder_ioctl_set_ctx_mgr(struct file *filp)
2773 struct binder_proc *proc = filp->private_data;
2774 kuid_t curr_euid = current_euid();
2776 if (binder_context_mgr_node != NULL) {
2777 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
2781 ret = security_binder_set_context_mgr(proc->cred);
2784 if (uid_valid(binder_context_mgr_uid)) {
2785 if (!uid_eq(binder_context_mgr_uid, curr_euid)) {
2786 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
2787 from_kuid(&init_user_ns, curr_euid),
2788 from_kuid(&init_user_ns,
2789 binder_context_mgr_uid));
2794 binder_context_mgr_uid = curr_euid;
2796 binder_context_mgr_node = binder_new_node(proc, 0, 0);
2797 if (binder_context_mgr_node == NULL) {
2801 binder_context_mgr_node->local_weak_refs++;
2802 binder_context_mgr_node->local_strong_refs++;
2803 binder_context_mgr_node->has_strong_ref = 1;
2804 binder_context_mgr_node->has_weak_ref = 1;
2809 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2812 struct binder_proc *proc = filp->private_data;
2813 struct binder_thread *thread;
2814 unsigned int size = _IOC_SIZE(cmd);
2815 void __user *ubuf = (void __user *)arg;
2817 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
2818 proc->pid, current->pid, cmd, arg);*/
2820 trace_binder_ioctl(cmd, arg);
2822 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2826 binder_lock(__func__);
2827 thread = binder_get_thread(proc);
2828 if (thread == NULL) {
2834 case BINDER_WRITE_READ:
2835 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
2839 case BINDER_SET_MAX_THREADS:
2840 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
2845 case BINDER_SET_CONTEXT_MGR:
2846 ret = binder_ioctl_set_ctx_mgr(filp);
2850 case BINDER_THREAD_EXIT:
2851 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
2852 proc->pid, thread->pid);
2853 binder_free_thread(proc, thread);
2856 case BINDER_VERSION: {
2857 struct binder_version __user *ver = ubuf;
2859 if (size != sizeof(struct binder_version)) {
2863 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
2864 &ver->protocol_version)) {
2877 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
2878 binder_unlock(__func__);
2879 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2880 if (ret && ret != -ERESTARTSYS)
2881 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
2883 trace_binder_ioctl_done(ret);
2887 static void binder_vma_open(struct vm_area_struct *vma)
2889 struct binder_proc *proc = vma->vm_private_data;
2891 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2892 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2893 proc->pid, vma->vm_start, vma->vm_end,
2894 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2895 (unsigned long)pgprot_val(vma->vm_page_prot));
2898 static void binder_vma_close(struct vm_area_struct *vma)
2900 struct binder_proc *proc = vma->vm_private_data;
2902 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2903 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2904 proc->pid, vma->vm_start, vma->vm_end,
2905 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2906 (unsigned long)pgprot_val(vma->vm_page_prot));
2908 proc->vma_vm_mm = NULL;
2909 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
2912 static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2914 return VM_FAULT_SIGBUS;
2917 static const struct vm_operations_struct binder_vm_ops = {
2918 .open = binder_vma_open,
2919 .close = binder_vma_close,
2920 .fault = binder_vm_fault,
2923 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
2926 struct vm_struct *area;
2927 struct binder_proc *proc = filp->private_data;
2928 const char *failure_string;
2929 struct binder_buffer *buffer;
2931 if (proc->tsk != current->group_leader)
2934 if ((vma->vm_end - vma->vm_start) > SZ_4M)
2935 vma->vm_end = vma->vm_start + SZ_4M;
2937 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2938 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
2939 proc->pid, vma->vm_start, vma->vm_end,
2940 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2941 (unsigned long)pgprot_val(vma->vm_page_prot));
2943 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
2945 failure_string = "bad vm_flags";
2948 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
2950 mutex_lock(&binder_mmap_lock);
2953 failure_string = "already mapped";
2954 goto err_already_mapped;
2957 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
2960 failure_string = "get_vm_area";
2961 goto err_get_vm_area_failed;
2963 proc->buffer = area->addr;
2964 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
2965 mutex_unlock(&binder_mmap_lock);
2967 #ifdef CONFIG_CPU_CACHE_VIPT
2968 if (cache_is_vipt_aliasing()) {
2969 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
2970 pr_info("binder_mmap: %d %lx-%lx maps %pK bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
2971 vma->vm_start += PAGE_SIZE;
2975 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
2976 if (proc->pages == NULL) {
2978 failure_string = "alloc page array";
2979 goto err_alloc_pages_failed;
2981 proc->buffer_size = vma->vm_end - vma->vm_start;
2983 vma->vm_ops = &binder_vm_ops;
2984 vma->vm_private_data = proc;
2986 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
2988 failure_string = "alloc small buf";
2989 goto err_alloc_small_buf_failed;
2991 buffer = proc->buffer;
2992 INIT_LIST_HEAD(&proc->buffers);
2993 list_add(&buffer->entry, &proc->buffers);
2995 binder_insert_free_buffer(proc, buffer);
2996 proc->free_async_space = proc->buffer_size / 2;
2998 mutex_lock(&proc->files_lock);
2999 proc->files = get_files_struct(current);
3000 mutex_unlock(&proc->files_lock);
3002 proc->vma_vm_mm = vma->vm_mm;
3004 /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
3005 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
3008 err_alloc_small_buf_failed:
3011 err_alloc_pages_failed:
3012 mutex_lock(&binder_mmap_lock);
3013 vfree(proc->buffer);
3014 proc->buffer = NULL;
3015 err_get_vm_area_failed:
3017 mutex_unlock(&binder_mmap_lock);
3019 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
3020 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
3024 static int binder_open(struct inode *nodp, struct file *filp)
3026 struct binder_proc *proc;
3028 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
3029 current->group_leader->pid, current->pid);
3031 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
3034 get_task_struct(current->group_leader);
3035 proc->tsk = current->group_leader;
3036 mutex_init(&proc->files_lock);
3037 proc->cred = get_cred(filp->f_cred);
3038 INIT_LIST_HEAD(&proc->todo);
3039 init_waitqueue_head(&proc->wait);
3040 proc->default_priority = task_nice(current);
3042 binder_lock(__func__);
3044 binder_stats_created(BINDER_STAT_PROC);
3045 hlist_add_head(&proc->proc_node, &binder_procs);
3046 proc->pid = current->group_leader->pid;
3047 INIT_LIST_HEAD(&proc->delivered_death);
3048 filp->private_data = proc;
3050 binder_unlock(__func__);
3052 if (binder_debugfs_dir_entry_proc) {
3055 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
3056 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
3057 binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
3063 static int binder_flush(struct file *filp, fl_owner_t id)
3065 struct binder_proc *proc = filp->private_data;
3067 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
3072 static void binder_deferred_flush(struct binder_proc *proc)
3077 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
3078 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
3080 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
3081 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
3082 wake_up_interruptible(&thread->wait);
3086 wake_up_interruptible_all(&proc->wait);
3088 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3089 "binder_flush: %d woke %d threads\n", proc->pid,
3093 static int binder_release(struct inode *nodp, struct file *filp)
3095 struct binder_proc *proc = filp->private_data;
3097 debugfs_remove(proc->debugfs_entry);
3098 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3103 static int binder_node_release(struct binder_node *node, int refs)
3105 struct binder_ref *ref;
3108 list_del_init(&node->work.entry);
3109 binder_release_work(&node->async_todo);
3111 if (hlist_empty(&node->refs)) {
3113 binder_stats_deleted(BINDER_STAT_NODE);
3119 node->local_strong_refs = 0;
3120 node->local_weak_refs = 0;
3121 hlist_add_head(&node->dead_node, &binder_dead_nodes);
3123 hlist_for_each_entry(ref, &node->refs, node_entry) {
3131 if (list_empty(&ref->death->work.entry)) {
3132 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3133 list_add_tail(&ref->death->work.entry,
3135 wake_up_interruptible(&ref->proc->wait);
3140 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3141 "node %d now dead, refs %d, death %d\n",
3142 node->debug_id, refs, death);
3147 static void binder_deferred_release(struct binder_proc *proc)
3149 struct binder_transaction *t;
3151 int threads, nodes, incoming_refs, outgoing_refs, buffers,
3152 active_transactions, page_count;
3155 BUG_ON(proc->files);
3157 hlist_del(&proc->proc_node);
3159 if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
3160 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3161 "%s: %d context_mgr_node gone\n",
3162 __func__, proc->pid);
3163 binder_context_mgr_node = NULL;
3167 active_transactions = 0;
3168 while ((n = rb_first(&proc->threads))) {
3169 struct binder_thread *thread;
3171 thread = rb_entry(n, struct binder_thread, rb_node);
3173 active_transactions += binder_free_thread(proc, thread);
3178 while ((n = rb_first(&proc->nodes))) {
3179 struct binder_node *node;
3181 node = rb_entry(n, struct binder_node, rb_node);
3183 rb_erase(&node->rb_node, &proc->nodes);
3184 incoming_refs = binder_node_release(node, incoming_refs);
3188 while ((n = rb_first(&proc->refs_by_desc))) {
3189 struct binder_ref *ref;
3191 ref = rb_entry(n, struct binder_ref, rb_node_desc);
3193 binder_delete_ref(ref);
3196 binder_release_work(&proc->todo);
3197 binder_release_work(&proc->delivered_death);
3200 while ((n = rb_first(&proc->allocated_buffers))) {
3201 struct binder_buffer *buffer;
3203 buffer = rb_entry(n, struct binder_buffer, rb_node);
3205 t = buffer->transaction;
3208 buffer->transaction = NULL;
3209 pr_err("release proc %d, transaction %d, not freed\n",
3210 proc->pid, t->debug_id);
3214 binder_free_buf(proc, buffer);
3218 binder_stats_deleted(BINDER_STAT_PROC);
3224 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
3227 if (!proc->pages[i])
3230 page_addr = proc->buffer + i * PAGE_SIZE;
3231 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
3232 "%s: %d: page %d at %pK not freed\n",
3233 __func__, proc->pid, i, page_addr);
3234 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
3235 __free_page(proc->pages[i]);
3239 vfree(proc->buffer);
3242 put_task_struct(proc->tsk);
3243 put_cred(proc->cred);
3245 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3246 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
3247 __func__, proc->pid, threads, nodes, incoming_refs,
3248 outgoing_refs, active_transactions, buffers, page_count);
3253 static void binder_deferred_func(struct work_struct *work)
3255 struct binder_proc *proc;
3256 struct files_struct *files;
3261 binder_lock(__func__);
3262 mutex_lock(&binder_deferred_lock);
3263 if (!hlist_empty(&binder_deferred_list)) {
3264 proc = hlist_entry(binder_deferred_list.first,
3265 struct binder_proc, deferred_work_node);
3266 hlist_del_init(&proc->deferred_work_node);
3267 defer = proc->deferred_work;
3268 proc->deferred_work = 0;
3273 mutex_unlock(&binder_deferred_lock);
3276 if (defer & BINDER_DEFERRED_PUT_FILES) {
3277 mutex_lock(&proc->files_lock);
3278 files = proc->files;
3281 mutex_unlock(&proc->files_lock);
3284 if (defer & BINDER_DEFERRED_FLUSH)
3285 binder_deferred_flush(proc);
3287 if (defer & BINDER_DEFERRED_RELEASE)
3288 binder_deferred_release(proc); /* frees proc */
3290 binder_unlock(__func__);
3292 put_files_struct(files);
3295 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3298 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3300 mutex_lock(&binder_deferred_lock);
3301 proc->deferred_work |= defer;
3302 if (hlist_unhashed(&proc->deferred_work_node)) {
3303 hlist_add_head(&proc->deferred_work_node,
3304 &binder_deferred_list);
3305 schedule_work(&binder_deferred_work);
3307 mutex_unlock(&binder_deferred_lock);
3310 static void print_binder_transaction(struct seq_file *m, const char *prefix,
3311 struct binder_transaction *t)
3314 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3315 prefix, t->debug_id, t,
3316 t->from ? t->from->proc->pid : 0,
3317 t->from ? t->from->pid : 0,
3318 t->to_proc ? t->to_proc->pid : 0,
3319 t->to_thread ? t->to_thread->pid : 0,
3320 t->code, t->flags, t->priority, t->need_reply);
3321 if (t->buffer == NULL) {
3322 seq_puts(m, " buffer free\n");
3325 if (t->buffer->target_node)
3326 seq_printf(m, " node %d",
3327 t->buffer->target_node->debug_id);
3328 seq_printf(m, " size %zd:%zd data %pK\n",
3329 t->buffer->data_size, t->buffer->offsets_size,
3333 static void print_binder_buffer(struct seq_file *m, const char *prefix,
3334 struct binder_buffer *buffer)
3336 seq_printf(m, "%s %d: %pK size %zd:%zd %s\n",
3337 prefix, buffer->debug_id, buffer->data,
3338 buffer->data_size, buffer->offsets_size,
3339 buffer->transaction ? "active" : "delivered");
3342 static void print_binder_work(struct seq_file *m, const char *prefix,
3343 const char *transaction_prefix,
3344 struct binder_work *w)
3346 struct binder_node *node;
3347 struct binder_transaction *t;
3350 case BINDER_WORK_TRANSACTION:
3351 t = container_of(w, struct binder_transaction, work);
3352 print_binder_transaction(m, transaction_prefix, t);
3354 case BINDER_WORK_TRANSACTION_COMPLETE:
3355 seq_printf(m, "%stransaction complete\n", prefix);
3357 case BINDER_WORK_NODE:
3358 node = container_of(w, struct binder_node, work);
3359 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
3360 prefix, node->debug_id,
3361 (u64)node->ptr, (u64)node->cookie);
3363 case BINDER_WORK_DEAD_BINDER:
3364 seq_printf(m, "%shas dead binder\n", prefix);
3366 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3367 seq_printf(m, "%shas cleared dead binder\n", prefix);
3369 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
3370 seq_printf(m, "%shas cleared death notification\n", prefix);
3373 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
3378 static void print_binder_thread(struct seq_file *m,
3379 struct binder_thread *thread,
3382 struct binder_transaction *t;
3383 struct binder_work *w;
3384 size_t start_pos = m->count;
3387 seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
3388 header_pos = m->count;
3389 t = thread->transaction_stack;
3391 if (t->from == thread) {
3392 print_binder_transaction(m,
3393 " outgoing transaction", t);
3395 } else if (t->to_thread == thread) {
3396 print_binder_transaction(m,
3397 " incoming transaction", t);
3400 print_binder_transaction(m, " bad transaction", t);
3404 list_for_each_entry(w, &thread->todo, entry) {
3405 print_binder_work(m, " ", " pending transaction", w);
3407 if (!print_always && m->count == header_pos)
3408 m->count = start_pos;
3411 static void print_binder_node(struct seq_file *m, struct binder_node *node)
3413 struct binder_ref *ref;
3414 struct binder_work *w;
3418 hlist_for_each_entry(ref, &node->refs, node_entry)
3421 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3422 node->debug_id, (u64)node->ptr, (u64)node->cookie,
3423 node->has_strong_ref, node->has_weak_ref,
3424 node->local_strong_refs, node->local_weak_refs,
3425 node->internal_strong_refs, count);
3427 seq_puts(m, " proc");
3428 hlist_for_each_entry(ref, &node->refs, node_entry)
3429 seq_printf(m, " %d", ref->proc->pid);
3432 list_for_each_entry(w, &node->async_todo, entry)
3433 print_binder_work(m, " ",
3434 " pending async transaction", w);
3437 static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
3439 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
3440 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3441 ref->node->debug_id, ref->strong, ref->weak, ref->death);
3444 static void print_binder_proc(struct seq_file *m,
3445 struct binder_proc *proc, int print_all)
3447 struct binder_work *w;
3449 size_t start_pos = m->count;
3452 seq_printf(m, "proc %d\n", proc->pid);
3453 header_pos = m->count;
3455 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3456 print_binder_thread(m, rb_entry(n, struct binder_thread,
3457 rb_node), print_all);
3458 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
3459 struct binder_node *node = rb_entry(n, struct binder_node,
3461 if (print_all || node->has_async_transaction)
3462 print_binder_node(m, node);
3465 for (n = rb_first(&proc->refs_by_desc);
3468 print_binder_ref(m, rb_entry(n, struct binder_ref,
3471 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3472 print_binder_buffer(m, " buffer",
3473 rb_entry(n, struct binder_buffer, rb_node));
3474 list_for_each_entry(w, &proc->todo, entry)
3475 print_binder_work(m, " ", " pending transaction", w);
3476 list_for_each_entry(w, &proc->delivered_death, entry) {
3477 seq_puts(m, " has delivered dead binder\n");
3480 if (!print_all && m->count == header_pos)
3481 m->count = start_pos;
3484 static const char * const binder_return_strings[] = {
3489 "BR_ACQUIRE_RESULT",
3491 "BR_TRANSACTION_COMPLETE",
3496 "BR_ATTEMPT_ACQUIRE",
3501 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3505 static const char * const binder_command_strings[] = {
3508 "BC_ACQUIRE_RESULT",
3516 "BC_ATTEMPT_ACQUIRE",
3517 "BC_REGISTER_LOOPER",
3520 "BC_REQUEST_DEATH_NOTIFICATION",
3521 "BC_CLEAR_DEATH_NOTIFICATION",
3522 "BC_DEAD_BINDER_DONE"
3525 static const char * const binder_objstat_strings[] = {
3532 "transaction_complete"
3535 static void print_binder_stats(struct seq_file *m, const char *prefix,
3536 struct binder_stats *stats)
3540 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
3541 ARRAY_SIZE(binder_command_strings));
3542 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
3544 seq_printf(m, "%s%s: %d\n", prefix,
3545 binder_command_strings[i], stats->bc[i]);
3548 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
3549 ARRAY_SIZE(binder_return_strings));
3550 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
3552 seq_printf(m, "%s%s: %d\n", prefix,
3553 binder_return_strings[i], stats->br[i]);
3556 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3557 ARRAY_SIZE(binder_objstat_strings));
3558 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3559 ARRAY_SIZE(stats->obj_deleted));
3560 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
3561 if (stats->obj_created[i] || stats->obj_deleted[i])
3562 seq_printf(m, "%s%s: active %d total %d\n", prefix,
3563 binder_objstat_strings[i],
3564 stats->obj_created[i] - stats->obj_deleted[i],
3565 stats->obj_created[i]);
3569 static void print_binder_proc_stats(struct seq_file *m,
3570 struct binder_proc *proc)
3572 struct binder_work *w;
3574 int count, strong, weak;
3576 seq_printf(m, "proc %d\n", proc->pid);
3578 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3580 seq_printf(m, " threads: %d\n", count);
3581 seq_printf(m, " requested threads: %d+%d/%d\n"
3582 " ready threads %d\n"
3583 " free async space %zd\n", proc->requested_threads,
3584 proc->requested_threads_started, proc->max_threads,
3585 proc->ready_threads, proc->free_async_space);
3587 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3589 seq_printf(m, " nodes: %d\n", count);
3593 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3594 struct binder_ref *ref = rb_entry(n, struct binder_ref,
3597 strong += ref->strong;
3600 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
3603 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3605 seq_printf(m, " buffers: %d\n", count);
3608 list_for_each_entry(w, &proc->todo, entry) {
3610 case BINDER_WORK_TRANSACTION:
3617 seq_printf(m, " pending transactions: %d\n", count);
3619 print_binder_stats(m, " ", &proc->stats);
3623 static int binder_state_show(struct seq_file *m, void *unused)
3625 struct binder_proc *proc;
3626 struct binder_node *node;
3627 int do_lock = !binder_debug_no_lock;
3630 binder_lock(__func__);
3632 seq_puts(m, "binder state:\n");
3634 if (!hlist_empty(&binder_dead_nodes))
3635 seq_puts(m, "dead nodes:\n");
3636 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
3637 print_binder_node(m, node);
3639 hlist_for_each_entry(proc, &binder_procs, proc_node)
3640 print_binder_proc(m, proc, 1);
3642 binder_unlock(__func__);
3646 static int binder_stats_show(struct seq_file *m, void *unused)
3648 struct binder_proc *proc;
3649 int do_lock = !binder_debug_no_lock;
3652 binder_lock(__func__);
3654 seq_puts(m, "binder stats:\n");
3656 print_binder_stats(m, "", &binder_stats);
3658 hlist_for_each_entry(proc, &binder_procs, proc_node)
3659 print_binder_proc_stats(m, proc);
3661 binder_unlock(__func__);
3665 static int binder_transactions_show(struct seq_file *m, void *unused)
3667 struct binder_proc *proc;
3668 int do_lock = !binder_debug_no_lock;
3671 binder_lock(__func__);
3673 seq_puts(m, "binder transactions:\n");
3674 hlist_for_each_entry(proc, &binder_procs, proc_node)
3675 print_binder_proc(m, proc, 0);
3677 binder_unlock(__func__);
3681 static int binder_proc_show(struct seq_file *m, void *unused)
3683 struct binder_proc *itr;
3684 struct binder_proc *proc = m->private;
3685 int do_lock = !binder_debug_no_lock;
3686 bool valid_proc = false;
3689 binder_lock(__func__);
3691 hlist_for_each_entry(itr, &binder_procs, proc_node) {
3698 seq_puts(m, "binder proc state:\n");
3699 print_binder_proc(m, proc, 1);
3702 binder_unlock(__func__);
3706 static void print_binder_transaction_log_entry(struct seq_file *m,
3707 struct binder_transaction_log_entry *e)
3710 "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
3711 e->debug_id, (e->call_type == 2) ? "reply" :
3712 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
3713 e->from_thread, e->to_proc, e->to_thread, e->to_node,
3714 e->target_handle, e->data_size, e->offsets_size);
3717 static int binder_transaction_log_show(struct seq_file *m, void *unused)
3719 struct binder_transaction_log *log = m->private;
3723 for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
3724 print_binder_transaction_log_entry(m, &log->entry[i]);
3726 for (i = 0; i < log->next; i++)
3727 print_binder_transaction_log_entry(m, &log->entry[i]);
3731 static const struct file_operations binder_fops = {
3732 .owner = THIS_MODULE,
3733 .poll = binder_poll,
3734 .unlocked_ioctl = binder_ioctl,
3735 .compat_ioctl = binder_ioctl,
3736 .mmap = binder_mmap,
3737 .open = binder_open,
3738 .flush = binder_flush,
3739 .release = binder_release,
3742 static struct miscdevice binder_miscdev = {
3743 .minor = MISC_DYNAMIC_MINOR,
3745 .fops = &binder_fops
3748 BINDER_DEBUG_ENTRY(state);
3749 BINDER_DEBUG_ENTRY(stats);
3750 BINDER_DEBUG_ENTRY(transactions);
3751 BINDER_DEBUG_ENTRY(transaction_log);
3753 static int __init binder_init(void)
3757 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
3758 if (binder_debugfs_dir_entry_root)
3759 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
3760 binder_debugfs_dir_entry_root);
3761 ret = misc_register(&binder_miscdev);
3762 if (binder_debugfs_dir_entry_root) {
3763 debugfs_create_file("state",
3765 binder_debugfs_dir_entry_root,
3767 &binder_state_fops);
3768 debugfs_create_file("stats",
3770 binder_debugfs_dir_entry_root,
3772 &binder_stats_fops);
3773 debugfs_create_file("transactions",
3775 binder_debugfs_dir_entry_root,
3777 &binder_transactions_fops);
3778 debugfs_create_file("transaction_log",
3780 binder_debugfs_dir_entry_root,
3781 &binder_transaction_log,
3782 &binder_transaction_log_fops);
3783 debugfs_create_file("failed_transaction_log",
3785 binder_debugfs_dir_entry_root,
3786 &binder_transaction_log_failed,
3787 &binder_transaction_log_fops);
3792 device_initcall(binder_init);
3794 #define CREATE_TRACE_POINTS
3795 #include "binder_trace.h"
3797 MODULE_LICENSE("GPL v2");