GNU Linux-libre 4.19.245-gnu1
[releases.git] / drivers / staging / android / ion / ion.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/staging/android/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  */
7
8 #include <linux/anon_inodes.h>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/dma-buf.h>
12 #include <linux/err.h>
13 #include <linux/export.h>
14 #include <linux/file.h>
15 #include <linux/freezer.h>
16 #include <linux/fs.h>
17 #include <linux/idr.h>
18 #include <linux/kthread.h>
19 #include <linux/list.h>
20 #include <linux/memblock.h>
21 #include <linux/miscdevice.h>
22 #include <linux/mm.h>
23 #include <linux/mm_types.h>
24 #include <linux/rbtree.h>
25 #include <linux/sched/task.h>
26 #include <linux/seq_file.h>
27 #include <linux/slab.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30
31 #include "ion.h"
32
33 static struct ion_device *internal_dev;
34 static int heap_id;
35
36 /* this function should only be called while dev->lock is held */
37 static void ion_buffer_add(struct ion_device *dev,
38                            struct ion_buffer *buffer)
39 {
40         struct rb_node **p = &dev->buffers.rb_node;
41         struct rb_node *parent = NULL;
42         struct ion_buffer *entry;
43
44         while (*p) {
45                 parent = *p;
46                 entry = rb_entry(parent, struct ion_buffer, node);
47
48                 if (buffer < entry) {
49                         p = &(*p)->rb_left;
50                 } else if (buffer > entry) {
51                         p = &(*p)->rb_right;
52                 } else {
53                         pr_err("%s: buffer already found.", __func__);
54                         BUG();
55                 }
56         }
57
58         rb_link_node(&buffer->node, parent, p);
59         rb_insert_color(&buffer->node, &dev->buffers);
60 }
61
62 /* this function should only be called while dev->lock is held */
63 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
64                                             struct ion_device *dev,
65                                             unsigned long len,
66                                             unsigned long flags)
67 {
68         struct ion_buffer *buffer;
69         int ret;
70
71         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
72         if (!buffer)
73                 return ERR_PTR(-ENOMEM);
74
75         buffer->heap = heap;
76         buffer->flags = flags;
77         buffer->dev = dev;
78         buffer->size = len;
79
80         ret = heap->ops->allocate(heap, buffer, len, flags);
81
82         if (ret) {
83                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
84                         goto err2;
85
86                 ion_heap_freelist_drain(heap, 0);
87                 ret = heap->ops->allocate(heap, buffer, len, flags);
88                 if (ret)
89                         goto err2;
90         }
91
92         if (!buffer->sg_table) {
93                 WARN_ONCE(1, "This heap needs to set the sgtable");
94                 ret = -EINVAL;
95                 goto err1;
96         }
97
98         INIT_LIST_HEAD(&buffer->attachments);
99         mutex_init(&buffer->lock);
100         mutex_lock(&dev->buffer_lock);
101         ion_buffer_add(dev, buffer);
102         mutex_unlock(&dev->buffer_lock);
103         return buffer;
104
105 err1:
106         heap->ops->free(buffer);
107 err2:
108         kfree(buffer);
109         return ERR_PTR(ret);
110 }
111
112 void ion_buffer_destroy(struct ion_buffer *buffer)
113 {
114         if (buffer->kmap_cnt > 0) {
115                 pr_warn_once("%s: buffer still mapped in the kernel\n",
116                              __func__);
117                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
118         }
119         buffer->heap->ops->free(buffer);
120         kfree(buffer);
121 }
122
123 static void _ion_buffer_destroy(struct ion_buffer *buffer)
124 {
125         struct ion_heap *heap = buffer->heap;
126         struct ion_device *dev = buffer->dev;
127
128         mutex_lock(&dev->buffer_lock);
129         rb_erase(&buffer->node, &dev->buffers);
130         mutex_unlock(&dev->buffer_lock);
131
132         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
133                 ion_heap_freelist_add(heap, buffer);
134         else
135                 ion_buffer_destroy(buffer);
136 }
137
138 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
139 {
140         void *vaddr;
141
142         if (buffer->kmap_cnt) {
143                 if (buffer->kmap_cnt == INT_MAX)
144                         return ERR_PTR(-EOVERFLOW);
145
146                 buffer->kmap_cnt++;
147                 return buffer->vaddr;
148         }
149         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
150         if (WARN_ONCE(!vaddr,
151                       "heap->ops->map_kernel should return ERR_PTR on error"))
152                 return ERR_PTR(-EINVAL);
153         if (IS_ERR(vaddr))
154                 return vaddr;
155         buffer->vaddr = vaddr;
156         buffer->kmap_cnt++;
157         return vaddr;
158 }
159
160 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
161 {
162         buffer->kmap_cnt--;
163         if (!buffer->kmap_cnt) {
164                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
165                 buffer->vaddr = NULL;
166         }
167 }
168
169 static struct sg_table *dup_sg_table(struct sg_table *table)
170 {
171         struct sg_table *new_table;
172         int ret, i;
173         struct scatterlist *sg, *new_sg;
174
175         new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
176         if (!new_table)
177                 return ERR_PTR(-ENOMEM);
178
179         ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
180         if (ret) {
181                 kfree(new_table);
182                 return ERR_PTR(-ENOMEM);
183         }
184
185         new_sg = new_table->sgl;
186         for_each_sg(table->sgl, sg, table->nents, i) {
187                 memcpy(new_sg, sg, sizeof(*sg));
188                 new_sg->dma_address = 0;
189                 new_sg = sg_next(new_sg);
190         }
191
192         return new_table;
193 }
194
195 static void free_duped_table(struct sg_table *table)
196 {
197         sg_free_table(table);
198         kfree(table);
199 }
200
201 struct ion_dma_buf_attachment {
202         struct device *dev;
203         struct sg_table *table;
204         struct list_head list;
205 };
206
207 static int ion_dma_buf_attach(struct dma_buf *dmabuf,
208                               struct dma_buf_attachment *attachment)
209 {
210         struct ion_dma_buf_attachment *a;
211         struct sg_table *table;
212         struct ion_buffer *buffer = dmabuf->priv;
213
214         a = kzalloc(sizeof(*a), GFP_KERNEL);
215         if (!a)
216                 return -ENOMEM;
217
218         table = dup_sg_table(buffer->sg_table);
219         if (IS_ERR(table)) {
220                 kfree(a);
221                 return -ENOMEM;
222         }
223
224         a->table = table;
225         a->dev = attachment->dev;
226         INIT_LIST_HEAD(&a->list);
227
228         attachment->priv = a;
229
230         mutex_lock(&buffer->lock);
231         list_add(&a->list, &buffer->attachments);
232         mutex_unlock(&buffer->lock);
233
234         return 0;
235 }
236
237 static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
238                                 struct dma_buf_attachment *attachment)
239 {
240         struct ion_dma_buf_attachment *a = attachment->priv;
241         struct ion_buffer *buffer = dmabuf->priv;
242
243         mutex_lock(&buffer->lock);
244         list_del(&a->list);
245         mutex_unlock(&buffer->lock);
246         free_duped_table(a->table);
247
248         kfree(a);
249 }
250
251 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
252                                         enum dma_data_direction direction)
253 {
254         struct ion_dma_buf_attachment *a = attachment->priv;
255         struct sg_table *table;
256
257         table = a->table;
258
259         if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
260                         direction))
261                 return ERR_PTR(-ENOMEM);
262
263         return table;
264 }
265
266 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
267                               struct sg_table *table,
268                               enum dma_data_direction direction)
269 {
270         dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
271 }
272
273 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
274 {
275         struct ion_buffer *buffer = dmabuf->priv;
276         int ret = 0;
277
278         if (!buffer->heap->ops->map_user) {
279                 pr_err("%s: this heap does not define a method for mapping to userspace\n",
280                        __func__);
281                 return -EINVAL;
282         }
283
284         if (!(buffer->flags & ION_FLAG_CACHED))
285                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
286
287         mutex_lock(&buffer->lock);
288         /* now map it to userspace */
289         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
290         mutex_unlock(&buffer->lock);
291
292         if (ret)
293                 pr_err("%s: failure mapping buffer to userspace\n",
294                        __func__);
295
296         return ret;
297 }
298
299 static void ion_dma_buf_release(struct dma_buf *dmabuf)
300 {
301         struct ion_buffer *buffer = dmabuf->priv;
302
303         _ion_buffer_destroy(buffer);
304 }
305
306 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
307 {
308         struct ion_buffer *buffer = dmabuf->priv;
309
310         return buffer->vaddr + offset * PAGE_SIZE;
311 }
312
313 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
314                                void *ptr)
315 {
316 }
317
318 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
319                                         enum dma_data_direction direction)
320 {
321         struct ion_buffer *buffer = dmabuf->priv;
322         void *vaddr;
323         struct ion_dma_buf_attachment *a;
324         int ret = 0;
325
326         /*
327          * TODO: Move this elsewhere because we don't always need a vaddr
328          */
329         if (buffer->heap->ops->map_kernel) {
330                 mutex_lock(&buffer->lock);
331                 vaddr = ion_buffer_kmap_get(buffer);
332                 if (IS_ERR(vaddr)) {
333                         ret = PTR_ERR(vaddr);
334                         goto unlock;
335                 }
336                 mutex_unlock(&buffer->lock);
337         }
338
339         mutex_lock(&buffer->lock);
340         list_for_each_entry(a, &buffer->attachments, list) {
341                 dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
342                                     direction);
343         }
344
345 unlock:
346         mutex_unlock(&buffer->lock);
347         return ret;
348 }
349
350 static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
351                                       enum dma_data_direction direction)
352 {
353         struct ion_buffer *buffer = dmabuf->priv;
354         struct ion_dma_buf_attachment *a;
355
356         if (buffer->heap->ops->map_kernel) {
357                 mutex_lock(&buffer->lock);
358                 ion_buffer_kmap_put(buffer);
359                 mutex_unlock(&buffer->lock);
360         }
361
362         mutex_lock(&buffer->lock);
363         list_for_each_entry(a, &buffer->attachments, list) {
364                 dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
365                                        direction);
366         }
367         mutex_unlock(&buffer->lock);
368
369         return 0;
370 }
371
372 static const struct dma_buf_ops dma_buf_ops = {
373         .map_dma_buf = ion_map_dma_buf,
374         .unmap_dma_buf = ion_unmap_dma_buf,
375         .mmap = ion_mmap,
376         .release = ion_dma_buf_release,
377         .attach = ion_dma_buf_attach,
378         .detach = ion_dma_buf_detatch,
379         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
380         .end_cpu_access = ion_dma_buf_end_cpu_access,
381         .map = ion_dma_buf_kmap,
382         .unmap = ion_dma_buf_kunmap,
383 };
384
385 int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
386 {
387         struct ion_device *dev = internal_dev;
388         struct ion_buffer *buffer = NULL;
389         struct ion_heap *heap;
390         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
391         int fd;
392         struct dma_buf *dmabuf;
393
394         pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
395                  len, heap_id_mask, flags);
396         /*
397          * traverse the list of heaps available in this system in priority
398          * order.  If the heap type is supported by the client, and matches the
399          * request of the caller allocate from it.  Repeat until allocate has
400          * succeeded or all heaps have been tried
401          */
402         len = PAGE_ALIGN(len);
403
404         if (!len)
405                 return -EINVAL;
406
407         down_read(&dev->lock);
408         plist_for_each_entry(heap, &dev->heaps, node) {
409                 /* if the caller didn't specify this heap id */
410                 if (!((1 << heap->id) & heap_id_mask))
411                         continue;
412                 buffer = ion_buffer_create(heap, dev, len, flags);
413                 if (!IS_ERR(buffer))
414                         break;
415         }
416         up_read(&dev->lock);
417
418         if (!buffer)
419                 return -ENODEV;
420
421         if (IS_ERR(buffer))
422                 return PTR_ERR(buffer);
423
424         exp_info.ops = &dma_buf_ops;
425         exp_info.size = buffer->size;
426         exp_info.flags = O_RDWR;
427         exp_info.priv = buffer;
428
429         dmabuf = dma_buf_export(&exp_info);
430         if (IS_ERR(dmabuf)) {
431                 _ion_buffer_destroy(buffer);
432                 return PTR_ERR(dmabuf);
433         }
434
435         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
436         if (fd < 0)
437                 dma_buf_put(dmabuf);
438
439         return fd;
440 }
441
442 int ion_query_heaps(struct ion_heap_query *query)
443 {
444         struct ion_device *dev = internal_dev;
445         struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
446         int ret = -EINVAL, cnt = 0, max_cnt;
447         struct ion_heap *heap;
448         struct ion_heap_data hdata;
449
450         memset(&hdata, 0, sizeof(hdata));
451
452         down_read(&dev->lock);
453         if (!buffer) {
454                 query->cnt = dev->heap_cnt;
455                 ret = 0;
456                 goto out;
457         }
458
459         if (query->cnt <= 0)
460                 goto out;
461
462         max_cnt = query->cnt;
463
464         plist_for_each_entry(heap, &dev->heaps, node) {
465                 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
466                 hdata.name[sizeof(hdata.name) - 1] = '\0';
467                 hdata.type = heap->type;
468                 hdata.heap_id = heap->id;
469
470                 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
471                         ret = -EFAULT;
472                         goto out;
473                 }
474
475                 cnt++;
476                 if (cnt >= max_cnt)
477                         break;
478         }
479
480         query->cnt = cnt;
481         ret = 0;
482 out:
483         up_read(&dev->lock);
484         return ret;
485 }
486
487 static const struct file_operations ion_fops = {
488         .owner          = THIS_MODULE,
489         .unlocked_ioctl = ion_ioctl,
490 #ifdef CONFIG_COMPAT
491         .compat_ioctl   = ion_ioctl,
492 #endif
493 };
494
495 static int debug_shrink_set(void *data, u64 val)
496 {
497         struct ion_heap *heap = data;
498         struct shrink_control sc;
499         int objs;
500
501         sc.gfp_mask = GFP_HIGHUSER;
502         sc.nr_to_scan = val;
503
504         if (!val) {
505                 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
506                 sc.nr_to_scan = objs;
507         }
508
509         heap->shrinker.scan_objects(&heap->shrinker, &sc);
510         return 0;
511 }
512
513 static int debug_shrink_get(void *data, u64 *val)
514 {
515         struct ion_heap *heap = data;
516         struct shrink_control sc;
517         int objs;
518
519         sc.gfp_mask = GFP_HIGHUSER;
520         sc.nr_to_scan = 0;
521
522         objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
523         *val = objs;
524         return 0;
525 }
526
527 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
528                         debug_shrink_set, "%llu\n");
529
530 void ion_device_add_heap(struct ion_heap *heap)
531 {
532         struct ion_device *dev = internal_dev;
533         int ret;
534
535         if (!heap->ops->allocate || !heap->ops->free)
536                 pr_err("%s: can not add heap with invalid ops struct.\n",
537                        __func__);
538
539         spin_lock_init(&heap->free_lock);
540         heap->free_list_size = 0;
541
542         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
543                 ion_heap_init_deferred_free(heap);
544
545         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) {
546                 ret = ion_heap_init_shrinker(heap);
547                 if (ret)
548                         pr_err("%s: Failed to register shrinker\n", __func__);
549         }
550
551         heap->dev = dev;
552         down_write(&dev->lock);
553         heap->id = heap_id++;
554         /*
555          * use negative heap->id to reverse the priority -- when traversing
556          * the list later attempt higher id numbers first
557          */
558         plist_node_init(&heap->node, -heap->id);
559         plist_add(&heap->node, &dev->heaps);
560
561         if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
562                 char debug_name[64];
563
564                 snprintf(debug_name, 64, "%s_shrink", heap->name);
565                 debugfs_create_file(debug_name, 0644, dev->debug_root,
566                                     heap, &debug_shrink_fops);
567         }
568
569         dev->heap_cnt++;
570         up_write(&dev->lock);
571 }
572 EXPORT_SYMBOL(ion_device_add_heap);
573
574 static int ion_device_create(void)
575 {
576         struct ion_device *idev;
577         int ret;
578
579         idev = kzalloc(sizeof(*idev), GFP_KERNEL);
580         if (!idev)
581                 return -ENOMEM;
582
583         idev->dev.minor = MISC_DYNAMIC_MINOR;
584         idev->dev.name = "ion";
585         idev->dev.fops = &ion_fops;
586         idev->dev.parent = NULL;
587         ret = misc_register(&idev->dev);
588         if (ret) {
589                 pr_err("ion: failed to register misc device.\n");
590                 kfree(idev);
591                 return ret;
592         }
593
594         idev->debug_root = debugfs_create_dir("ion", NULL);
595         idev->buffers = RB_ROOT;
596         mutex_init(&idev->buffer_lock);
597         init_rwsem(&idev->lock);
598         plist_head_init(&idev->heaps);
599         internal_dev = idev;
600         return 0;
601 }
602 subsys_initcall(ion_device_create);