GNU Linux-libre 5.4.241-gnu1
[releases.git] / drivers / android / binder_alloc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder_alloc.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2017 Google, Inc.
7  */
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/list.h>
12 #include <linux/sched/mm.h>
13 #include <linux/module.h>
14 #include <linux/rtmutex.h>
15 #include <linux/rbtree.h>
16 #include <linux/seq_file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/list_lru.h>
21 #include <linux/ratelimit.h>
22 #include <asm/cacheflush.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/sizes.h>
26 #include "binder_alloc.h"
27 #include "binder_trace.h"
28
29 struct list_lru binder_alloc_lru;
30
31 static DEFINE_MUTEX(binder_alloc_mmap_lock);
32
33 enum {
34         BINDER_DEBUG_USER_ERROR             = 1U << 0,
35         BINDER_DEBUG_OPEN_CLOSE             = 1U << 1,
36         BINDER_DEBUG_BUFFER_ALLOC           = 1U << 2,
37         BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 3,
38 };
39 static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
40
41 module_param_named(debug_mask, binder_alloc_debug_mask,
42                    uint, 0644);
43
44 #define binder_alloc_debug(mask, x...) \
45         do { \
46                 if (binder_alloc_debug_mask & mask) \
47                         pr_info_ratelimited(x); \
48         } while (0)
49
50 static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
51 {
52         return list_entry(buffer->entry.next, struct binder_buffer, entry);
53 }
54
55 static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
56 {
57         return list_entry(buffer->entry.prev, struct binder_buffer, entry);
58 }
59
60 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
61                                        struct binder_buffer *buffer)
62 {
63         if (list_is_last(&buffer->entry, &alloc->buffers))
64                 return alloc->buffer + alloc->buffer_size - buffer->user_data;
65         return binder_buffer_next(buffer)->user_data - buffer->user_data;
66 }
67
68 static void binder_insert_free_buffer(struct binder_alloc *alloc,
69                                       struct binder_buffer *new_buffer)
70 {
71         struct rb_node **p = &alloc->free_buffers.rb_node;
72         struct rb_node *parent = NULL;
73         struct binder_buffer *buffer;
74         size_t buffer_size;
75         size_t new_buffer_size;
76
77         BUG_ON(!new_buffer->free);
78
79         new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
80
81         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
82                      "%d: add free buffer, size %zd, at %pK\n",
83                       alloc->pid, new_buffer_size, new_buffer);
84
85         while (*p) {
86                 parent = *p;
87                 buffer = rb_entry(parent, struct binder_buffer, rb_node);
88                 BUG_ON(!buffer->free);
89
90                 buffer_size = binder_alloc_buffer_size(alloc, buffer);
91
92                 if (new_buffer_size < buffer_size)
93                         p = &parent->rb_left;
94                 else
95                         p = &parent->rb_right;
96         }
97         rb_link_node(&new_buffer->rb_node, parent, p);
98         rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
99 }
100
101 static void binder_insert_allocated_buffer_locked(
102                 struct binder_alloc *alloc, struct binder_buffer *new_buffer)
103 {
104         struct rb_node **p = &alloc->allocated_buffers.rb_node;
105         struct rb_node *parent = NULL;
106         struct binder_buffer *buffer;
107
108         BUG_ON(new_buffer->free);
109
110         while (*p) {
111                 parent = *p;
112                 buffer = rb_entry(parent, struct binder_buffer, rb_node);
113                 BUG_ON(buffer->free);
114
115                 if (new_buffer->user_data < buffer->user_data)
116                         p = &parent->rb_left;
117                 else if (new_buffer->user_data > buffer->user_data)
118                         p = &parent->rb_right;
119                 else
120                         BUG();
121         }
122         rb_link_node(&new_buffer->rb_node, parent, p);
123         rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
124 }
125
126 static struct binder_buffer *binder_alloc_prepare_to_free_locked(
127                 struct binder_alloc *alloc,
128                 uintptr_t user_ptr)
129 {
130         struct rb_node *n = alloc->allocated_buffers.rb_node;
131         struct binder_buffer *buffer;
132         void __user *uptr;
133
134         uptr = (void __user *)user_ptr;
135
136         while (n) {
137                 buffer = rb_entry(n, struct binder_buffer, rb_node);
138                 BUG_ON(buffer->free);
139
140                 if (uptr < buffer->user_data)
141                         n = n->rb_left;
142                 else if (uptr > buffer->user_data)
143                         n = n->rb_right;
144                 else {
145                         /*
146                          * Guard against user threads attempting to
147                          * free the buffer when in use by kernel or
148                          * after it's already been freed.
149                          */
150                         if (!buffer->allow_user_free)
151                                 return ERR_PTR(-EPERM);
152                         buffer->allow_user_free = 0;
153                         return buffer;
154                 }
155         }
156         return NULL;
157 }
158
159 /**
160  * binder_alloc_prepare_to_free() - get buffer given user ptr
161  * @alloc:      binder_alloc for this proc
162  * @user_ptr:   User pointer to buffer data
163  *
164  * Validate userspace pointer to buffer data and return buffer corresponding to
165  * that user pointer. Search the rb tree for buffer that matches user data
166  * pointer.
167  *
168  * Return:      Pointer to buffer or NULL
169  */
170 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
171                                                    uintptr_t user_ptr)
172 {
173         struct binder_buffer *buffer;
174
175         mutex_lock(&alloc->mutex);
176         buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
177         mutex_unlock(&alloc->mutex);
178         return buffer;
179 }
180
181 static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
182                                     void __user *start, void __user *end)
183 {
184         void __user *page_addr;
185         unsigned long user_page_addr;
186         struct binder_lru_page *page;
187         struct vm_area_struct *vma = NULL;
188         struct mm_struct *mm = NULL;
189         bool need_mm = false;
190
191         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
192                      "%d: %s pages %pK-%pK\n", alloc->pid,
193                      allocate ? "allocate" : "free", start, end);
194
195         if (end <= start)
196                 return 0;
197
198         trace_binder_update_page_range(alloc, allocate, start, end);
199
200         if (allocate == 0)
201                 goto free_range;
202
203         for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
204                 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
205                 if (!page->page_ptr) {
206                         need_mm = true;
207                         break;
208                 }
209         }
210
211         if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
212                 mm = alloc->vma_vm_mm;
213
214         if (mm) {
215                 down_write(&mm->mmap_sem);
216                 vma = alloc->vma;
217         }
218
219         if (!vma && need_mm) {
220                 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
221                                    "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
222                                    alloc->pid);
223                 goto err_no_vma;
224         }
225
226         for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
227                 int ret;
228                 bool on_lru;
229                 size_t index;
230
231                 index = (page_addr - alloc->buffer) / PAGE_SIZE;
232                 page = &alloc->pages[index];
233
234                 if (page->page_ptr) {
235                         trace_binder_alloc_lru_start(alloc, index);
236
237                         on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
238                         WARN_ON(!on_lru);
239
240                         trace_binder_alloc_lru_end(alloc, index);
241                         continue;
242                 }
243
244                 if (WARN_ON(!vma))
245                         goto err_page_ptr_cleared;
246
247                 trace_binder_alloc_page_start(alloc, index);
248                 page->page_ptr = alloc_page(GFP_KERNEL |
249                                             __GFP_HIGHMEM |
250                                             __GFP_ZERO);
251                 if (!page->page_ptr) {
252                         pr_err("%d: binder_alloc_buf failed for page at %pK\n",
253                                 alloc->pid, page_addr);
254                         goto err_alloc_page_failed;
255                 }
256                 page->alloc = alloc;
257                 INIT_LIST_HEAD(&page->lru);
258
259                 user_page_addr = (uintptr_t)page_addr;
260                 ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
261                 if (ret) {
262                         pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
263                                alloc->pid, user_page_addr);
264                         goto err_vm_insert_page_failed;
265                 }
266
267                 if (index + 1 > alloc->pages_high)
268                         alloc->pages_high = index + 1;
269
270                 trace_binder_alloc_page_end(alloc, index);
271                 /* vm_insert_page does not seem to increment the refcount */
272         }
273         if (mm) {
274                 up_write(&mm->mmap_sem);
275                 mmput(mm);
276         }
277         return 0;
278
279 free_range:
280         for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) {
281                 bool ret;
282                 size_t index;
283
284                 index = (page_addr - alloc->buffer) / PAGE_SIZE;
285                 page = &alloc->pages[index];
286
287                 trace_binder_free_lru_start(alloc, index);
288
289                 ret = list_lru_add(&binder_alloc_lru, &page->lru);
290                 WARN_ON(!ret);
291
292                 trace_binder_free_lru_end(alloc, index);
293                 if (page_addr == start)
294                         break;
295                 continue;
296
297 err_vm_insert_page_failed:
298                 __free_page(page->page_ptr);
299                 page->page_ptr = NULL;
300 err_alloc_page_failed:
301 err_page_ptr_cleared:
302                 if (page_addr == start)
303                         break;
304         }
305 err_no_vma:
306         if (mm) {
307                 up_write(&mm->mmap_sem);
308                 mmput(mm);
309         }
310         return vma ? -ENOMEM : -ESRCH;
311 }
312
313
314 static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
315                 struct vm_area_struct *vma)
316 {
317         if (vma)
318                 alloc->vma_vm_mm = vma->vm_mm;
319         /*
320          * If we see alloc->vma is not NULL, buffer data structures set up
321          * completely. Look at smp_rmb side binder_alloc_get_vma.
322          * We also want to guarantee new alloc->vma_vm_mm is always visible
323          * if alloc->vma is set.
324          */
325         smp_wmb();
326         alloc->vma = vma;
327 }
328
329 static inline struct vm_area_struct *binder_alloc_get_vma(
330                 struct binder_alloc *alloc)
331 {
332         struct vm_area_struct *vma = NULL;
333
334         if (alloc->vma) {
335                 /* Look at description in binder_alloc_set_vma */
336                 smp_rmb();
337                 vma = alloc->vma;
338         }
339         return vma;
340 }
341
342 static struct binder_buffer *binder_alloc_new_buf_locked(
343                                 struct binder_alloc *alloc,
344                                 size_t data_size,
345                                 size_t offsets_size,
346                                 size_t extra_buffers_size,
347                                 int is_async)
348 {
349         struct rb_node *n = alloc->free_buffers.rb_node;
350         struct binder_buffer *buffer;
351         size_t buffer_size;
352         struct rb_node *best_fit = NULL;
353         void __user *has_page_addr;
354         void __user *end_page_addr;
355         size_t size, data_offsets_size;
356         int ret;
357
358         if (!binder_alloc_get_vma(alloc)) {
359                 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
360                                    "%d: binder_alloc_buf, no vma\n",
361                                    alloc->pid);
362                 return ERR_PTR(-ESRCH);
363         }
364
365         data_offsets_size = ALIGN(data_size, sizeof(void *)) +
366                 ALIGN(offsets_size, sizeof(void *));
367
368         if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
369                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
370                                 "%d: got transaction with invalid size %zd-%zd\n",
371                                 alloc->pid, data_size, offsets_size);
372                 return ERR_PTR(-EINVAL);
373         }
374         size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
375         if (size < data_offsets_size || size < extra_buffers_size) {
376                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
377                                 "%d: got transaction with invalid extra_buffers_size %zd\n",
378                                 alloc->pid, extra_buffers_size);
379                 return ERR_PTR(-EINVAL);
380         }
381         if (is_async &&
382             alloc->free_async_space < size + sizeof(struct binder_buffer)) {
383                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
384                              "%d: binder_alloc_buf size %zd failed, no async space left\n",
385                               alloc->pid, size);
386                 return ERR_PTR(-ENOSPC);
387         }
388
389         /* Pad 0-size buffers so they get assigned unique addresses */
390         size = max(size, sizeof(void *));
391
392         while (n) {
393                 buffer = rb_entry(n, struct binder_buffer, rb_node);
394                 BUG_ON(!buffer->free);
395                 buffer_size = binder_alloc_buffer_size(alloc, buffer);
396
397                 if (size < buffer_size) {
398                         best_fit = n;
399                         n = n->rb_left;
400                 } else if (size > buffer_size)
401                         n = n->rb_right;
402                 else {
403                         best_fit = n;
404                         break;
405                 }
406         }
407         if (best_fit == NULL) {
408                 size_t allocated_buffers = 0;
409                 size_t largest_alloc_size = 0;
410                 size_t total_alloc_size = 0;
411                 size_t free_buffers = 0;
412                 size_t largest_free_size = 0;
413                 size_t total_free_size = 0;
414
415                 for (n = rb_first(&alloc->allocated_buffers); n != NULL;
416                      n = rb_next(n)) {
417                         buffer = rb_entry(n, struct binder_buffer, rb_node);
418                         buffer_size = binder_alloc_buffer_size(alloc, buffer);
419                         allocated_buffers++;
420                         total_alloc_size += buffer_size;
421                         if (buffer_size > largest_alloc_size)
422                                 largest_alloc_size = buffer_size;
423                 }
424                 for (n = rb_first(&alloc->free_buffers); n != NULL;
425                      n = rb_next(n)) {
426                         buffer = rb_entry(n, struct binder_buffer, rb_node);
427                         buffer_size = binder_alloc_buffer_size(alloc, buffer);
428                         free_buffers++;
429                         total_free_size += buffer_size;
430                         if (buffer_size > largest_free_size)
431                                 largest_free_size = buffer_size;
432                 }
433                 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
434                                    "%d: binder_alloc_buf size %zd failed, no address space\n",
435                                    alloc->pid, size);
436                 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
437                                    "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
438                                    total_alloc_size, allocated_buffers,
439                                    largest_alloc_size, total_free_size,
440                                    free_buffers, largest_free_size);
441                 return ERR_PTR(-ENOSPC);
442         }
443         if (n == NULL) {
444                 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
445                 buffer_size = binder_alloc_buffer_size(alloc, buffer);
446         }
447
448         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
449                      "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
450                       alloc->pid, size, buffer, buffer_size);
451
452         has_page_addr = (void __user *)
453                 (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
454         WARN_ON(n && buffer_size != size);
455         end_page_addr =
456                 (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
457         if (end_page_addr > has_page_addr)
458                 end_page_addr = has_page_addr;
459         ret = binder_update_page_range(alloc, 1, (void __user *)
460                 PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
461         if (ret)
462                 return ERR_PTR(ret);
463
464         if (buffer_size != size) {
465                 struct binder_buffer *new_buffer;
466
467                 new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
468                 if (!new_buffer) {
469                         pr_err("%s: %d failed to alloc new buffer struct\n",
470                                __func__, alloc->pid);
471                         goto err_alloc_buf_struct_failed;
472                 }
473                 new_buffer->user_data = (u8 __user *)buffer->user_data + size;
474                 list_add(&new_buffer->entry, &buffer->entry);
475                 new_buffer->free = 1;
476                 binder_insert_free_buffer(alloc, new_buffer);
477         }
478
479         rb_erase(best_fit, &alloc->free_buffers);
480         buffer->free = 0;
481         buffer->allow_user_free = 0;
482         binder_insert_allocated_buffer_locked(alloc, buffer);
483         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
484                      "%d: binder_alloc_buf size %zd got %pK\n",
485                       alloc->pid, size, buffer);
486         buffer->data_size = data_size;
487         buffer->offsets_size = offsets_size;
488         buffer->async_transaction = is_async;
489         buffer->extra_buffers_size = extra_buffers_size;
490         if (is_async) {
491                 alloc->free_async_space -= size + sizeof(struct binder_buffer);
492                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
493                              "%d: binder_alloc_buf size %zd async free %zd\n",
494                               alloc->pid, size, alloc->free_async_space);
495         }
496         return buffer;
497
498 err_alloc_buf_struct_failed:
499         binder_update_page_range(alloc, 0, (void __user *)
500                                  PAGE_ALIGN((uintptr_t)buffer->user_data),
501                                  end_page_addr);
502         return ERR_PTR(-ENOMEM);
503 }
504
505 /**
506  * binder_alloc_new_buf() - Allocate a new binder buffer
507  * @alloc:              binder_alloc for this proc
508  * @data_size:          size of user data buffer
509  * @offsets_size:       user specified buffer offset
510  * @extra_buffers_size: size of extra space for meta-data (eg, security context)
511  * @is_async:           buffer for async transaction
512  *
513  * Allocate a new buffer given the requested sizes. Returns
514  * the kernel version of the buffer pointer. The size allocated
515  * is the sum of the three given sizes (each rounded up to
516  * pointer-sized boundary)
517  *
518  * Return:      The allocated buffer or %NULL if error
519  */
520 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
521                                            size_t data_size,
522                                            size_t offsets_size,
523                                            size_t extra_buffers_size,
524                                            int is_async)
525 {
526         struct binder_buffer *buffer;
527
528         mutex_lock(&alloc->mutex);
529         buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
530                                              extra_buffers_size, is_async);
531         mutex_unlock(&alloc->mutex);
532         return buffer;
533 }
534
535 static void __user *buffer_start_page(struct binder_buffer *buffer)
536 {
537         return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK);
538 }
539
540 static void __user *prev_buffer_end_page(struct binder_buffer *buffer)
541 {
542         return (void __user *)
543                 (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK);
544 }
545
546 static void binder_delete_free_buffer(struct binder_alloc *alloc,
547                                       struct binder_buffer *buffer)
548 {
549         struct binder_buffer *prev, *next = NULL;
550         bool to_free = true;
551         BUG_ON(alloc->buffers.next == &buffer->entry);
552         prev = binder_buffer_prev(buffer);
553         BUG_ON(!prev->free);
554         if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
555                 to_free = false;
556                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
557                                    "%d: merge free, buffer %pK share page with %pK\n",
558                                    alloc->pid, buffer->user_data,
559                                    prev->user_data);
560         }
561
562         if (!list_is_last(&buffer->entry, &alloc->buffers)) {
563                 next = binder_buffer_next(buffer);
564                 if (buffer_start_page(next) == buffer_start_page(buffer)) {
565                         to_free = false;
566                         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
567                                            "%d: merge free, buffer %pK share page with %pK\n",
568                                            alloc->pid,
569                                            buffer->user_data,
570                                            next->user_data);
571                 }
572         }
573
574         if (PAGE_ALIGNED(buffer->user_data)) {
575                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
576                                    "%d: merge free, buffer start %pK is page aligned\n",
577                                    alloc->pid, buffer->user_data);
578                 to_free = false;
579         }
580
581         if (to_free) {
582                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
583                                    "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
584                                    alloc->pid, buffer->user_data,
585                                    prev->user_data,
586                                    next ? next->user_data : NULL);
587                 binder_update_page_range(alloc, 0, buffer_start_page(buffer),
588                                          buffer_start_page(buffer) + PAGE_SIZE);
589         }
590         list_del(&buffer->entry);
591         kfree(buffer);
592 }
593
594 static void binder_free_buf_locked(struct binder_alloc *alloc,
595                                    struct binder_buffer *buffer)
596 {
597         size_t size, buffer_size;
598
599         buffer_size = binder_alloc_buffer_size(alloc, buffer);
600
601         size = ALIGN(buffer->data_size, sizeof(void *)) +
602                 ALIGN(buffer->offsets_size, sizeof(void *)) +
603                 ALIGN(buffer->extra_buffers_size, sizeof(void *));
604
605         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
606                      "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
607                       alloc->pid, buffer, size, buffer_size);
608
609         BUG_ON(buffer->free);
610         BUG_ON(size > buffer_size);
611         BUG_ON(buffer->transaction != NULL);
612         BUG_ON(buffer->user_data < alloc->buffer);
613         BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
614
615         if (buffer->async_transaction) {
616                 alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
617
618                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
619                              "%d: binder_free_buf size %zd async free %zd\n",
620                               alloc->pid, size, alloc->free_async_space);
621         }
622
623         binder_update_page_range(alloc, 0,
624                 (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data),
625                 (void __user *)(((uintptr_t)
626                           buffer->user_data + buffer_size) & PAGE_MASK));
627
628         rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
629         buffer->free = 1;
630         if (!list_is_last(&buffer->entry, &alloc->buffers)) {
631                 struct binder_buffer *next = binder_buffer_next(buffer);
632
633                 if (next->free) {
634                         rb_erase(&next->rb_node, &alloc->free_buffers);
635                         binder_delete_free_buffer(alloc, next);
636                 }
637         }
638         if (alloc->buffers.next != &buffer->entry) {
639                 struct binder_buffer *prev = binder_buffer_prev(buffer);
640
641                 if (prev->free) {
642                         binder_delete_free_buffer(alloc, buffer);
643                         rb_erase(&prev->rb_node, &alloc->free_buffers);
644                         buffer = prev;
645                 }
646         }
647         binder_insert_free_buffer(alloc, buffer);
648 }
649
650 static void binder_alloc_clear_buf(struct binder_alloc *alloc,
651                                    struct binder_buffer *buffer);
652 /**
653  * binder_alloc_free_buf() - free a binder buffer
654  * @alloc:      binder_alloc for this proc
655  * @buffer:     kernel pointer to buffer
656  *
657  * Free the buffer allocated via binder_alloc_new_buffer()
658  */
659 void binder_alloc_free_buf(struct binder_alloc *alloc,
660                             struct binder_buffer *buffer)
661 {
662         /*
663          * We could eliminate the call to binder_alloc_clear_buf()
664          * from binder_alloc_deferred_release() by moving this to
665          * binder_alloc_free_buf_locked(). However, that could
666          * increase contention for the alloc mutex if clear_on_free
667          * is used frequently for large buffers. The mutex is not
668          * needed for correctness here.
669          */
670         if (buffer->clear_on_free) {
671                 binder_alloc_clear_buf(alloc, buffer);
672                 buffer->clear_on_free = false;
673         }
674         mutex_lock(&alloc->mutex);
675         binder_free_buf_locked(alloc, buffer);
676         mutex_unlock(&alloc->mutex);
677 }
678
679 /**
680  * binder_alloc_mmap_handler() - map virtual address space for proc
681  * @alloc:      alloc structure for this proc
682  * @vma:        vma passed to mmap()
683  *
684  * Called by binder_mmap() to initialize the space specified in
685  * vma for allocating binder buffers
686  *
687  * Return:
688  *      0 = success
689  *      -EBUSY = address space already mapped
690  *      -ENOMEM = failed to map memory to given address space
691  */
692 int binder_alloc_mmap_handler(struct binder_alloc *alloc,
693                               struct vm_area_struct *vma)
694 {
695         int ret;
696         const char *failure_string;
697         struct binder_buffer *buffer;
698
699         mutex_lock(&binder_alloc_mmap_lock);
700         if (alloc->buffer_size) {
701                 ret = -EBUSY;
702                 failure_string = "already mapped";
703                 goto err_already_mapped;
704         }
705         alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
706                                    SZ_4M);
707         mutex_unlock(&binder_alloc_mmap_lock);
708
709         alloc->buffer = (void __user *)vma->vm_start;
710
711         alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
712                                sizeof(alloc->pages[0]),
713                                GFP_KERNEL);
714         if (alloc->pages == NULL) {
715                 ret = -ENOMEM;
716                 failure_string = "alloc page array";
717                 goto err_alloc_pages_failed;
718         }
719
720         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
721         if (!buffer) {
722                 ret = -ENOMEM;
723                 failure_string = "alloc buffer struct";
724                 goto err_alloc_buf_struct_failed;
725         }
726
727         buffer->user_data = alloc->buffer;
728         list_add(&buffer->entry, &alloc->buffers);
729         buffer->free = 1;
730         binder_insert_free_buffer(alloc, buffer);
731         alloc->free_async_space = alloc->buffer_size / 2;
732         binder_alloc_set_vma(alloc, vma);
733         mmgrab(alloc->vma_vm_mm);
734
735         return 0;
736
737 err_alloc_buf_struct_failed:
738         kfree(alloc->pages);
739         alloc->pages = NULL;
740 err_alloc_pages_failed:
741         alloc->buffer = NULL;
742         mutex_lock(&binder_alloc_mmap_lock);
743         alloc->buffer_size = 0;
744 err_already_mapped:
745         mutex_unlock(&binder_alloc_mmap_lock);
746         binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
747                            "%s: %d %lx-%lx %s failed %d\n", __func__,
748                            alloc->pid, vma->vm_start, vma->vm_end,
749                            failure_string, ret);
750         return ret;
751 }
752
753
754 void binder_alloc_deferred_release(struct binder_alloc *alloc)
755 {
756         struct rb_node *n;
757         int buffers, page_count;
758         struct binder_buffer *buffer;
759
760         buffers = 0;
761         mutex_lock(&alloc->mutex);
762         BUG_ON(alloc->vma);
763
764         while ((n = rb_first(&alloc->allocated_buffers))) {
765                 buffer = rb_entry(n, struct binder_buffer, rb_node);
766
767                 /* Transaction should already have been freed */
768                 BUG_ON(buffer->transaction);
769
770                 if (buffer->clear_on_free) {
771                         binder_alloc_clear_buf(alloc, buffer);
772                         buffer->clear_on_free = false;
773                 }
774                 binder_free_buf_locked(alloc, buffer);
775                 buffers++;
776         }
777
778         while (!list_empty(&alloc->buffers)) {
779                 buffer = list_first_entry(&alloc->buffers,
780                                           struct binder_buffer, entry);
781                 WARN_ON(!buffer->free);
782
783                 list_del(&buffer->entry);
784                 WARN_ON_ONCE(!list_empty(&alloc->buffers));
785                 kfree(buffer);
786         }
787
788         page_count = 0;
789         if (alloc->pages) {
790                 int i;
791
792                 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
793                         void __user *page_addr;
794                         bool on_lru;
795
796                         if (!alloc->pages[i].page_ptr)
797                                 continue;
798
799                         on_lru = list_lru_del(&binder_alloc_lru,
800                                               &alloc->pages[i].lru);
801                         page_addr = alloc->buffer + i * PAGE_SIZE;
802                         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
803                                      "%s: %d: page %d at %pK %s\n",
804                                      __func__, alloc->pid, i, page_addr,
805                                      on_lru ? "on lru" : "active");
806                         __free_page(alloc->pages[i].page_ptr);
807                         page_count++;
808                 }
809                 kfree(alloc->pages);
810         }
811         mutex_unlock(&alloc->mutex);
812         if (alloc->vma_vm_mm)
813                 mmdrop(alloc->vma_vm_mm);
814
815         binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
816                      "%s: %d buffers %d, pages %d\n",
817                      __func__, alloc->pid, buffers, page_count);
818 }
819
820 static void print_binder_buffer(struct seq_file *m, const char *prefix,
821                                 struct binder_buffer *buffer)
822 {
823         seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
824                    prefix, buffer->debug_id, buffer->user_data,
825                    buffer->data_size, buffer->offsets_size,
826                    buffer->extra_buffers_size,
827                    buffer->transaction ? "active" : "delivered");
828 }
829
830 /**
831  * binder_alloc_print_allocated() - print buffer info
832  * @m:     seq_file for output via seq_printf()
833  * @alloc: binder_alloc for this proc
834  *
835  * Prints information about every buffer associated with
836  * the binder_alloc state to the given seq_file
837  */
838 void binder_alloc_print_allocated(struct seq_file *m,
839                                   struct binder_alloc *alloc)
840 {
841         struct rb_node *n;
842
843         mutex_lock(&alloc->mutex);
844         for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
845                 print_binder_buffer(m, "  buffer",
846                                     rb_entry(n, struct binder_buffer, rb_node));
847         mutex_unlock(&alloc->mutex);
848 }
849
850 /**
851  * binder_alloc_print_pages() - print page usage
852  * @m:     seq_file for output via seq_printf()
853  * @alloc: binder_alloc for this proc
854  */
855 void binder_alloc_print_pages(struct seq_file *m,
856                               struct binder_alloc *alloc)
857 {
858         struct binder_lru_page *page;
859         int i;
860         int active = 0;
861         int lru = 0;
862         int free = 0;
863
864         mutex_lock(&alloc->mutex);
865         /*
866          * Make sure the binder_alloc is fully initialized, otherwise we might
867          * read inconsistent state.
868          */
869         if (binder_alloc_get_vma(alloc) != NULL) {
870                 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
871                         page = &alloc->pages[i];
872                         if (!page->page_ptr)
873                                 free++;
874                         else if (list_empty(&page->lru))
875                                 active++;
876                         else
877                                 lru++;
878                 }
879         }
880         mutex_unlock(&alloc->mutex);
881         seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
882         seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);
883 }
884
885 /**
886  * binder_alloc_get_allocated_count() - return count of buffers
887  * @alloc: binder_alloc for this proc
888  *
889  * Return: count of allocated buffers
890  */
891 int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
892 {
893         struct rb_node *n;
894         int count = 0;
895
896         mutex_lock(&alloc->mutex);
897         for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
898                 count++;
899         mutex_unlock(&alloc->mutex);
900         return count;
901 }
902
903
904 /**
905  * binder_alloc_vma_close() - invalidate address space
906  * @alloc: binder_alloc for this proc
907  *
908  * Called from binder_vma_close() when releasing address space.
909  * Clears alloc->vma to prevent new incoming transactions from
910  * allocating more buffers.
911  */
912 void binder_alloc_vma_close(struct binder_alloc *alloc)
913 {
914         binder_alloc_set_vma(alloc, NULL);
915 }
916
917 /**
918  * binder_alloc_free_page() - shrinker callback to free pages
919  * @item:   item to free
920  * @lock:   lock protecting the item
921  * @cb_arg: callback argument
922  *
923  * Called from list_lru_walk() in binder_shrink_scan() to free
924  * up pages when the system is under memory pressure.
925  */
926 enum lru_status binder_alloc_free_page(struct list_head *item,
927                                        struct list_lru_one *lru,
928                                        spinlock_t *lock,
929                                        void *cb_arg)
930         __must_hold(lock)
931 {
932         struct mm_struct *mm = NULL;
933         struct binder_lru_page *page = container_of(item,
934                                                     struct binder_lru_page,
935                                                     lru);
936         struct binder_alloc *alloc;
937         uintptr_t page_addr;
938         size_t index;
939         struct vm_area_struct *vma;
940
941         alloc = page->alloc;
942         if (!mutex_trylock(&alloc->mutex))
943                 goto err_get_alloc_mutex_failed;
944
945         if (!page->page_ptr)
946                 goto err_page_already_freed;
947
948         index = page - alloc->pages;
949         page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
950
951         mm = alloc->vma_vm_mm;
952         if (!mmget_not_zero(mm))
953                 goto err_mmget;
954         if (!down_read_trylock(&mm->mmap_sem))
955                 goto err_down_read_mmap_sem_failed;
956         vma = binder_alloc_get_vma(alloc);
957
958         list_lru_isolate(lru, item);
959         spin_unlock(lock);
960
961         if (vma) {
962                 trace_binder_unmap_user_start(alloc, index);
963
964                 zap_page_range(vma, page_addr, PAGE_SIZE);
965
966                 trace_binder_unmap_user_end(alloc, index);
967         }
968         up_read(&mm->mmap_sem);
969         mmput_async(mm);
970
971         trace_binder_unmap_kernel_start(alloc, index);
972
973         __free_page(page->page_ptr);
974         page->page_ptr = NULL;
975
976         trace_binder_unmap_kernel_end(alloc, index);
977
978         spin_lock(lock);
979         mutex_unlock(&alloc->mutex);
980         return LRU_REMOVED_RETRY;
981
982 err_down_read_mmap_sem_failed:
983         mmput_async(mm);
984 err_mmget:
985 err_page_already_freed:
986         mutex_unlock(&alloc->mutex);
987 err_get_alloc_mutex_failed:
988         return LRU_SKIP;
989 }
990
991 static unsigned long
992 binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
993 {
994         unsigned long ret = list_lru_count(&binder_alloc_lru);
995         return ret;
996 }
997
998 static unsigned long
999 binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1000 {
1001         unsigned long ret;
1002
1003         ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
1004                             NULL, sc->nr_to_scan);
1005         return ret;
1006 }
1007
1008 static struct shrinker binder_shrinker = {
1009         .count_objects = binder_shrink_count,
1010         .scan_objects = binder_shrink_scan,
1011         .seeks = DEFAULT_SEEKS,
1012 };
1013
1014 /**
1015  * binder_alloc_init() - called by binder_open() for per-proc initialization
1016  * @alloc: binder_alloc for this proc
1017  *
1018  * Called from binder_open() to initialize binder_alloc fields for
1019  * new binder proc
1020  */
1021 void binder_alloc_init(struct binder_alloc *alloc)
1022 {
1023         alloc->pid = current->group_leader->pid;
1024         mutex_init(&alloc->mutex);
1025         INIT_LIST_HEAD(&alloc->buffers);
1026 }
1027
1028 int binder_alloc_shrinker_init(void)
1029 {
1030         int ret = list_lru_init(&binder_alloc_lru);
1031
1032         if (ret == 0) {
1033                 ret = register_shrinker(&binder_shrinker);
1034                 if (ret)
1035                         list_lru_destroy(&binder_alloc_lru);
1036         }
1037         return ret;
1038 }
1039
1040 /**
1041  * check_buffer() - verify that buffer/offset is safe to access
1042  * @alloc: binder_alloc for this proc
1043  * @buffer: binder buffer to be accessed
1044  * @offset: offset into @buffer data
1045  * @bytes: bytes to access from offset
1046  *
1047  * Check that the @offset/@bytes are within the size of the given
1048  * @buffer and that the buffer is currently active and not freeable.
1049  * Offsets must also be multiples of sizeof(u32). The kernel is
1050  * allowed to touch the buffer in two cases:
1051  *
1052  * 1) when the buffer is being created:
1053  *     (buffer->free == 0 && buffer->allow_user_free == 0)
1054  * 2) when the buffer is being torn down:
1055  *     (buffer->free == 0 && buffer->transaction == NULL).
1056  *
1057  * Return: true if the buffer is safe to access
1058  */
1059 static inline bool check_buffer(struct binder_alloc *alloc,
1060                                 struct binder_buffer *buffer,
1061                                 binder_size_t offset, size_t bytes)
1062 {
1063         size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1064
1065         return buffer_size >= bytes &&
1066                 offset <= buffer_size - bytes &&
1067                 IS_ALIGNED(offset, sizeof(u32)) &&
1068                 !buffer->free &&
1069                 (!buffer->allow_user_free || !buffer->transaction);
1070 }
1071
1072 /**
1073  * binder_alloc_get_page() - get kernel pointer for given buffer offset
1074  * @alloc: binder_alloc for this proc
1075  * @buffer: binder buffer to be accessed
1076  * @buffer_offset: offset into @buffer data
1077  * @pgoffp: address to copy final page offset to
1078  *
1079  * Lookup the struct page corresponding to the address
1080  * at @buffer_offset into @buffer->user_data. If @pgoffp is not
1081  * NULL, the byte-offset into the page is written there.
1082  *
1083  * The caller is responsible to ensure that the offset points
1084  * to a valid address within the @buffer and that @buffer is
1085  * not freeable by the user. Since it can't be freed, we are
1086  * guaranteed that the corresponding elements of @alloc->pages[]
1087  * cannot change.
1088  *
1089  * Return: struct page
1090  */
1091 static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
1092                                           struct binder_buffer *buffer,
1093                                           binder_size_t buffer_offset,
1094                                           pgoff_t *pgoffp)
1095 {
1096         binder_size_t buffer_space_offset = buffer_offset +
1097                 (buffer->user_data - alloc->buffer);
1098         pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
1099         size_t index = buffer_space_offset >> PAGE_SHIFT;
1100         struct binder_lru_page *lru_page;
1101
1102         lru_page = &alloc->pages[index];
1103         *pgoffp = pgoff;
1104         return lru_page->page_ptr;
1105 }
1106
1107 /**
1108  * binder_alloc_clear_buf() - zero out buffer
1109  * @alloc: binder_alloc for this proc
1110  * @buffer: binder buffer to be cleared
1111  *
1112  * memset the given buffer to 0
1113  */
1114 static void binder_alloc_clear_buf(struct binder_alloc *alloc,
1115                                    struct binder_buffer *buffer)
1116 {
1117         size_t bytes = binder_alloc_buffer_size(alloc, buffer);
1118         binder_size_t buffer_offset = 0;
1119
1120         while (bytes) {
1121                 unsigned long size;
1122                 struct page *page;
1123                 pgoff_t pgoff;
1124                 void *kptr;
1125
1126                 page = binder_alloc_get_page(alloc, buffer,
1127                                              buffer_offset, &pgoff);
1128                 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1129                 kptr = kmap(page) + pgoff;
1130                 memset(kptr, 0, size);
1131                 kunmap(page);
1132                 bytes -= size;
1133                 buffer_offset += size;
1134         }
1135 }
1136
1137 /**
1138  * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1139  * @alloc: binder_alloc for this proc
1140  * @buffer: binder buffer to be accessed
1141  * @buffer_offset: offset into @buffer data
1142  * @from: userspace pointer to source buffer
1143  * @bytes: bytes to copy
1144  *
1145  * Copy bytes from source userspace to target buffer.
1146  *
1147  * Return: bytes remaining to be copied
1148  */
1149 unsigned long
1150 binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1151                                  struct binder_buffer *buffer,
1152                                  binder_size_t buffer_offset,
1153                                  const void __user *from,
1154                                  size_t bytes)
1155 {
1156         if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1157                 return bytes;
1158
1159         while (bytes) {
1160                 unsigned long size;
1161                 unsigned long ret;
1162                 struct page *page;
1163                 pgoff_t pgoff;
1164                 void *kptr;
1165
1166                 page = binder_alloc_get_page(alloc, buffer,
1167                                              buffer_offset, &pgoff);
1168                 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1169                 kptr = kmap(page) + pgoff;
1170                 ret = copy_from_user(kptr, from, size);
1171                 kunmap(page);
1172                 if (ret)
1173                         return bytes - size + ret;
1174                 bytes -= size;
1175                 from += size;
1176                 buffer_offset += size;
1177         }
1178         return 0;
1179 }
1180
1181 static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1182                                        bool to_buffer,
1183                                        struct binder_buffer *buffer,
1184                                        binder_size_t buffer_offset,
1185                                        void *ptr,
1186                                        size_t bytes)
1187 {
1188         /* All copies must be 32-bit aligned and 32-bit size */
1189         if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1190                 return -EINVAL;
1191
1192         while (bytes) {
1193                 unsigned long size;
1194                 struct page *page;
1195                 pgoff_t pgoff;
1196                 void *tmpptr;
1197                 void *base_ptr;
1198
1199                 page = binder_alloc_get_page(alloc, buffer,
1200                                              buffer_offset, &pgoff);
1201                 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1202                 base_ptr = kmap_atomic(page);
1203                 tmpptr = base_ptr + pgoff;
1204                 if (to_buffer)
1205                         memcpy(tmpptr, ptr, size);
1206                 else
1207                         memcpy(ptr, tmpptr, size);
1208                 /*
1209                  * kunmap_atomic() takes care of flushing the cache
1210                  * if this device has VIVT cache arch
1211                  */
1212                 kunmap_atomic(base_ptr);
1213                 bytes -= size;
1214                 pgoff = 0;
1215                 ptr = ptr + size;
1216                 buffer_offset += size;
1217         }
1218         return 0;
1219 }
1220
1221 int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1222                                 struct binder_buffer *buffer,
1223                                 binder_size_t buffer_offset,
1224                                 void *src,
1225                                 size_t bytes)
1226 {
1227         return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1228                                            src, bytes);
1229 }
1230
1231 int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1232                                   void *dest,
1233                                   struct binder_buffer *buffer,
1234                                   binder_size_t buffer_offset,
1235                                   size_t bytes)
1236 {
1237         return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
1238                                            dest, bytes);
1239 }
1240