1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/staging/android/ion/ion.c
5 * Copyright (C) 2011 Google, Inc.
8 #include <linux/anon_inodes.h>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/dma-buf.h>
12 #include <linux/err.h>
13 #include <linux/export.h>
14 #include <linux/file.h>
15 #include <linux/freezer.h>
17 #include <linux/idr.h>
18 #include <linux/kthread.h>
19 #include <linux/list.h>
20 #include <linux/memblock.h>
21 #include <linux/miscdevice.h>
23 #include <linux/mm_types.h>
24 #include <linux/rbtree.h>
25 #include <linux/sched/task.h>
26 #include <linux/seq_file.h>
27 #include <linux/slab.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
33 static struct ion_device *internal_dev;
36 /* this function should only be called while dev->lock is held */
37 static void ion_buffer_add(struct ion_device *dev,
38 struct ion_buffer *buffer)
40 struct rb_node **p = &dev->buffers.rb_node;
41 struct rb_node *parent = NULL;
42 struct ion_buffer *entry;
46 entry = rb_entry(parent, struct ion_buffer, node);
50 } else if (buffer > entry) {
53 pr_err("%s: buffer already found.", __func__);
58 rb_link_node(&buffer->node, parent, p);
59 rb_insert_color(&buffer->node, &dev->buffers);
62 /* this function should only be called while dev->lock is held */
63 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
64 struct ion_device *dev,
68 struct ion_buffer *buffer;
71 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
73 return ERR_PTR(-ENOMEM);
76 buffer->flags = flags;
80 ret = heap->ops->allocate(heap, buffer, len, flags);
83 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
86 ion_heap_freelist_drain(heap, 0);
87 ret = heap->ops->allocate(heap, buffer, len, flags);
92 if (!buffer->sg_table) {
93 WARN_ONCE(1, "This heap needs to set the sgtable");
98 INIT_LIST_HEAD(&buffer->attachments);
99 mutex_init(&buffer->lock);
100 mutex_lock(&dev->buffer_lock);
101 ion_buffer_add(dev, buffer);
102 mutex_unlock(&dev->buffer_lock);
106 heap->ops->free(buffer);
112 void ion_buffer_destroy(struct ion_buffer *buffer)
114 if (buffer->kmap_cnt > 0) {
115 pr_warn_once("%s: buffer still mapped in the kernel\n",
117 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
119 buffer->heap->ops->free(buffer);
123 static void _ion_buffer_destroy(struct ion_buffer *buffer)
125 struct ion_heap *heap = buffer->heap;
126 struct ion_device *dev = buffer->dev;
128 mutex_lock(&dev->buffer_lock);
129 rb_erase(&buffer->node, &dev->buffers);
130 mutex_unlock(&dev->buffer_lock);
132 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
133 ion_heap_freelist_add(heap, buffer);
135 ion_buffer_destroy(buffer);
138 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
142 if (buffer->kmap_cnt) {
143 if (buffer->kmap_cnt == INT_MAX)
144 return ERR_PTR(-EOVERFLOW);
147 return buffer->vaddr;
149 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
150 if (WARN_ONCE(!vaddr,
151 "heap->ops->map_kernel should return ERR_PTR on error"))
152 return ERR_PTR(-EINVAL);
155 buffer->vaddr = vaddr;
160 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
163 if (!buffer->kmap_cnt) {
164 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
165 buffer->vaddr = NULL;
169 static struct sg_table *dup_sg_table(struct sg_table *table)
171 struct sg_table *new_table;
173 struct scatterlist *sg, *new_sg;
175 new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
177 return ERR_PTR(-ENOMEM);
179 ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
182 return ERR_PTR(-ENOMEM);
185 new_sg = new_table->sgl;
186 for_each_sg(table->sgl, sg, table->nents, i) {
187 memcpy(new_sg, sg, sizeof(*sg));
188 new_sg->dma_address = 0;
189 new_sg = sg_next(new_sg);
195 static void free_duped_table(struct sg_table *table)
197 sg_free_table(table);
201 struct ion_dma_buf_attachment {
203 struct sg_table *table;
204 struct list_head list;
207 static int ion_dma_buf_attach(struct dma_buf *dmabuf,
208 struct dma_buf_attachment *attachment)
210 struct ion_dma_buf_attachment *a;
211 struct sg_table *table;
212 struct ion_buffer *buffer = dmabuf->priv;
214 a = kzalloc(sizeof(*a), GFP_KERNEL);
218 table = dup_sg_table(buffer->sg_table);
225 a->dev = attachment->dev;
226 INIT_LIST_HEAD(&a->list);
228 attachment->priv = a;
230 mutex_lock(&buffer->lock);
231 list_add(&a->list, &buffer->attachments);
232 mutex_unlock(&buffer->lock);
237 static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
238 struct dma_buf_attachment *attachment)
240 struct ion_dma_buf_attachment *a = attachment->priv;
241 struct ion_buffer *buffer = dmabuf->priv;
243 mutex_lock(&buffer->lock);
245 mutex_unlock(&buffer->lock);
246 free_duped_table(a->table);
251 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
252 enum dma_data_direction direction)
254 struct ion_dma_buf_attachment *a = attachment->priv;
255 struct sg_table *table;
259 if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
261 return ERR_PTR(-ENOMEM);
266 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
267 struct sg_table *table,
268 enum dma_data_direction direction)
270 dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
273 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
275 struct ion_buffer *buffer = dmabuf->priv;
278 if (!buffer->heap->ops->map_user) {
279 pr_err("%s: this heap does not define a method for mapping to userspace\n",
284 if (!(buffer->flags & ION_FLAG_CACHED))
285 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
287 mutex_lock(&buffer->lock);
288 /* now map it to userspace */
289 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
290 mutex_unlock(&buffer->lock);
293 pr_err("%s: failure mapping buffer to userspace\n",
299 static void ion_dma_buf_release(struct dma_buf *dmabuf)
301 struct ion_buffer *buffer = dmabuf->priv;
303 _ion_buffer_destroy(buffer);
306 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
308 struct ion_buffer *buffer = dmabuf->priv;
310 return buffer->vaddr + offset * PAGE_SIZE;
313 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
318 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
319 enum dma_data_direction direction)
321 struct ion_buffer *buffer = dmabuf->priv;
323 struct ion_dma_buf_attachment *a;
327 * TODO: Move this elsewhere because we don't always need a vaddr
329 if (buffer->heap->ops->map_kernel) {
330 mutex_lock(&buffer->lock);
331 vaddr = ion_buffer_kmap_get(buffer);
333 ret = PTR_ERR(vaddr);
336 mutex_unlock(&buffer->lock);
339 mutex_lock(&buffer->lock);
340 list_for_each_entry(a, &buffer->attachments, list) {
341 dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
346 mutex_unlock(&buffer->lock);
350 static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
351 enum dma_data_direction direction)
353 struct ion_buffer *buffer = dmabuf->priv;
354 struct ion_dma_buf_attachment *a;
356 if (buffer->heap->ops->map_kernel) {
357 mutex_lock(&buffer->lock);
358 ion_buffer_kmap_put(buffer);
359 mutex_unlock(&buffer->lock);
362 mutex_lock(&buffer->lock);
363 list_for_each_entry(a, &buffer->attachments, list) {
364 dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
367 mutex_unlock(&buffer->lock);
372 static const struct dma_buf_ops dma_buf_ops = {
373 .map_dma_buf = ion_map_dma_buf,
374 .unmap_dma_buf = ion_unmap_dma_buf,
376 .release = ion_dma_buf_release,
377 .attach = ion_dma_buf_attach,
378 .detach = ion_dma_buf_detatch,
379 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
380 .end_cpu_access = ion_dma_buf_end_cpu_access,
381 .map = ion_dma_buf_kmap,
382 .unmap = ion_dma_buf_kunmap,
385 int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
387 struct ion_device *dev = internal_dev;
388 struct ion_buffer *buffer = NULL;
389 struct ion_heap *heap;
390 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
392 struct dma_buf *dmabuf;
394 pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
395 len, heap_id_mask, flags);
397 * traverse the list of heaps available in this system in priority
398 * order. If the heap type is supported by the client, and matches the
399 * request of the caller allocate from it. Repeat until allocate has
400 * succeeded or all heaps have been tried
402 len = PAGE_ALIGN(len);
407 down_read(&dev->lock);
408 plist_for_each_entry(heap, &dev->heaps, node) {
409 /* if the caller didn't specify this heap id */
410 if (!((1 << heap->id) & heap_id_mask))
412 buffer = ion_buffer_create(heap, dev, len, flags);
422 return PTR_ERR(buffer);
424 exp_info.ops = &dma_buf_ops;
425 exp_info.size = buffer->size;
426 exp_info.flags = O_RDWR;
427 exp_info.priv = buffer;
429 dmabuf = dma_buf_export(&exp_info);
430 if (IS_ERR(dmabuf)) {
431 _ion_buffer_destroy(buffer);
432 return PTR_ERR(dmabuf);
435 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
442 int ion_query_heaps(struct ion_heap_query *query)
444 struct ion_device *dev = internal_dev;
445 struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
446 int ret = -EINVAL, cnt = 0, max_cnt;
447 struct ion_heap *heap;
448 struct ion_heap_data hdata;
450 memset(&hdata, 0, sizeof(hdata));
452 down_read(&dev->lock);
454 query->cnt = dev->heap_cnt;
462 max_cnt = query->cnt;
464 plist_for_each_entry(heap, &dev->heaps, node) {
465 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
466 hdata.name[sizeof(hdata.name) - 1] = '\0';
467 hdata.type = heap->type;
468 hdata.heap_id = heap->id;
470 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
487 static const struct file_operations ion_fops = {
488 .owner = THIS_MODULE,
489 .unlocked_ioctl = ion_ioctl,
491 .compat_ioctl = ion_ioctl,
495 static int debug_shrink_set(void *data, u64 val)
497 struct ion_heap *heap = data;
498 struct shrink_control sc;
501 sc.gfp_mask = GFP_HIGHUSER;
505 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
506 sc.nr_to_scan = objs;
509 heap->shrinker.scan_objects(&heap->shrinker, &sc);
513 static int debug_shrink_get(void *data, u64 *val)
515 struct ion_heap *heap = data;
516 struct shrink_control sc;
519 sc.gfp_mask = GFP_HIGHUSER;
522 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
527 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
528 debug_shrink_set, "%llu\n");
530 void ion_device_add_heap(struct ion_heap *heap)
532 struct ion_device *dev = internal_dev;
535 if (!heap->ops->allocate || !heap->ops->free)
536 pr_err("%s: can not add heap with invalid ops struct.\n",
539 spin_lock_init(&heap->free_lock);
540 heap->free_list_size = 0;
542 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
543 ion_heap_init_deferred_free(heap);
545 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) {
546 ret = ion_heap_init_shrinker(heap);
548 pr_err("%s: Failed to register shrinker\n", __func__);
552 down_write(&dev->lock);
553 heap->id = heap_id++;
555 * use negative heap->id to reverse the priority -- when traversing
556 * the list later attempt higher id numbers first
558 plist_node_init(&heap->node, -heap->id);
559 plist_add(&heap->node, &dev->heaps);
561 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
564 snprintf(debug_name, 64, "%s_shrink", heap->name);
565 debugfs_create_file(debug_name, 0644, dev->debug_root,
566 heap, &debug_shrink_fops);
570 up_write(&dev->lock);
572 EXPORT_SYMBOL(ion_device_add_heap);
574 static int ion_device_create(void)
576 struct ion_device *idev;
579 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
583 idev->dev.minor = MISC_DYNAMIC_MINOR;
584 idev->dev.name = "ion";
585 idev->dev.fops = &ion_fops;
586 idev->dev.parent = NULL;
587 ret = misc_register(&idev->dev);
589 pr_err("ion: failed to register misc device.\n");
594 idev->debug_root = debugfs_create_dir("ion", NULL);
595 idev->buffers = RB_ROOT;
596 mutex_init(&idev->buffer_lock);
597 init_rwsem(&idev->lock);
598 plist_head_init(&idev->heaps);
602 subsys_initcall(ion_device_create);