3 * drivers/staging/android/ion/ion.c
5 * Copyright (C) 2011 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/device.h>
19 #include <linux/err.h>
20 #include <linux/file.h>
21 #include <linux/freezer.h>
23 #include <linux/anon_inodes.h>
24 #include <linux/kthread.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/export.h>
30 #include <linux/mm_types.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/debugfs.h>
37 #include <linux/dma-buf.h>
38 #include <linux/idr.h>
39 #include <linux/sched/task.h>
43 static struct ion_device *internal_dev;
46 bool ion_buffer_cached(struct ion_buffer *buffer)
48 return !!(buffer->flags & ION_FLAG_CACHED);
51 /* this function should only be called while dev->lock is held */
52 static void ion_buffer_add(struct ion_device *dev,
53 struct ion_buffer *buffer)
55 struct rb_node **p = &dev->buffers.rb_node;
56 struct rb_node *parent = NULL;
57 struct ion_buffer *entry;
61 entry = rb_entry(parent, struct ion_buffer, node);
65 } else if (buffer > entry) {
68 pr_err("%s: buffer already found.", __func__);
73 rb_link_node(&buffer->node, parent, p);
74 rb_insert_color(&buffer->node, &dev->buffers);
77 /* this function should only be called while dev->lock is held */
78 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
79 struct ion_device *dev,
83 struct ion_buffer *buffer;
84 struct sg_table *table;
87 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
89 return ERR_PTR(-ENOMEM);
92 buffer->flags = flags;
94 ret = heap->ops->allocate(heap, buffer, len, flags);
97 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
100 ion_heap_freelist_drain(heap, 0);
101 ret = heap->ops->allocate(heap, buffer, len, flags);
106 if (!buffer->sg_table) {
107 WARN_ONCE(1, "This heap needs to set the sgtable");
112 table = buffer->sg_table;
118 INIT_LIST_HEAD(&buffer->attachments);
119 mutex_init(&buffer->lock);
120 mutex_lock(&dev->buffer_lock);
121 ion_buffer_add(dev, buffer);
122 mutex_unlock(&dev->buffer_lock);
126 heap->ops->free(buffer);
132 void ion_buffer_destroy(struct ion_buffer *buffer)
134 if (buffer->kmap_cnt > 0) {
135 pr_warn_once("%s: buffer still mapped in the kernel\n",
137 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
139 buffer->heap->ops->free(buffer);
143 static void _ion_buffer_destroy(struct ion_buffer *buffer)
145 struct ion_heap *heap = buffer->heap;
146 struct ion_device *dev = buffer->dev;
148 mutex_lock(&dev->buffer_lock);
149 rb_erase(&buffer->node, &dev->buffers);
150 mutex_unlock(&dev->buffer_lock);
152 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
153 ion_heap_freelist_add(heap, buffer);
155 ion_buffer_destroy(buffer);
158 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
162 if (buffer->kmap_cnt) {
164 return buffer->vaddr;
166 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
167 if (WARN_ONCE(!vaddr,
168 "heap->ops->map_kernel should return ERR_PTR on error"))
169 return ERR_PTR(-EINVAL);
172 buffer->vaddr = vaddr;
177 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
180 if (!buffer->kmap_cnt) {
181 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
182 buffer->vaddr = NULL;
186 static struct sg_table *dup_sg_table(struct sg_table *table)
188 struct sg_table *new_table;
190 struct scatterlist *sg, *new_sg;
192 new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
194 return ERR_PTR(-ENOMEM);
196 ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
199 return ERR_PTR(-ENOMEM);
202 new_sg = new_table->sgl;
203 for_each_sg(table->sgl, sg, table->nents, i) {
204 memcpy(new_sg, sg, sizeof(*sg));
206 new_sg = sg_next(new_sg);
212 static void free_duped_table(struct sg_table *table)
214 sg_free_table(table);
218 struct ion_dma_buf_attachment {
220 struct sg_table *table;
221 struct list_head list;
224 static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev,
225 struct dma_buf_attachment *attachment)
227 struct ion_dma_buf_attachment *a;
228 struct sg_table *table;
229 struct ion_buffer *buffer = dmabuf->priv;
231 a = kzalloc(sizeof(*a), GFP_KERNEL);
235 table = dup_sg_table(buffer->sg_table);
243 INIT_LIST_HEAD(&a->list);
245 attachment->priv = a;
247 mutex_lock(&buffer->lock);
248 list_add(&a->list, &buffer->attachments);
249 mutex_unlock(&buffer->lock);
254 static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
255 struct dma_buf_attachment *attachment)
257 struct ion_dma_buf_attachment *a = attachment->priv;
258 struct ion_buffer *buffer = dmabuf->priv;
260 mutex_lock(&buffer->lock);
262 mutex_unlock(&buffer->lock);
263 free_duped_table(a->table);
268 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
269 enum dma_data_direction direction)
271 struct ion_dma_buf_attachment *a = attachment->priv;
272 struct sg_table *table;
276 if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
278 return ERR_PTR(-ENOMEM);
283 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
284 struct sg_table *table,
285 enum dma_data_direction direction)
287 dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
290 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
292 struct ion_buffer *buffer = dmabuf->priv;
295 if (!buffer->heap->ops->map_user) {
296 pr_err("%s: this heap does not define a method for mapping to userspace\n",
301 if (!(buffer->flags & ION_FLAG_CACHED))
302 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
304 mutex_lock(&buffer->lock);
305 /* now map it to userspace */
306 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
307 mutex_unlock(&buffer->lock);
310 pr_err("%s: failure mapping buffer to userspace\n",
316 static void ion_dma_buf_release(struct dma_buf *dmabuf)
318 struct ion_buffer *buffer = dmabuf->priv;
320 _ion_buffer_destroy(buffer);
323 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
325 struct ion_buffer *buffer = dmabuf->priv;
327 return buffer->vaddr + offset * PAGE_SIZE;
330 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
335 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
336 enum dma_data_direction direction)
338 struct ion_buffer *buffer = dmabuf->priv;
340 struct ion_dma_buf_attachment *a;
343 * TODO: Move this elsewhere because we don't always need a vaddr
345 if (buffer->heap->ops->map_kernel) {
346 mutex_lock(&buffer->lock);
347 vaddr = ion_buffer_kmap_get(buffer);
348 mutex_unlock(&buffer->lock);
351 mutex_lock(&buffer->lock);
352 list_for_each_entry(a, &buffer->attachments, list) {
353 dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
356 mutex_unlock(&buffer->lock);
361 static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
362 enum dma_data_direction direction)
364 struct ion_buffer *buffer = dmabuf->priv;
365 struct ion_dma_buf_attachment *a;
367 if (buffer->heap->ops->map_kernel) {
368 mutex_lock(&buffer->lock);
369 ion_buffer_kmap_put(buffer);
370 mutex_unlock(&buffer->lock);
373 mutex_lock(&buffer->lock);
374 list_for_each_entry(a, &buffer->attachments, list) {
375 dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
378 mutex_unlock(&buffer->lock);
383 static const struct dma_buf_ops dma_buf_ops = {
384 .map_dma_buf = ion_map_dma_buf,
385 .unmap_dma_buf = ion_unmap_dma_buf,
387 .release = ion_dma_buf_release,
388 .attach = ion_dma_buf_attach,
389 .detach = ion_dma_buf_detatch,
390 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
391 .end_cpu_access = ion_dma_buf_end_cpu_access,
392 .map_atomic = ion_dma_buf_kmap,
393 .unmap_atomic = ion_dma_buf_kunmap,
394 .map = ion_dma_buf_kmap,
395 .unmap = ion_dma_buf_kunmap,
398 int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
400 struct ion_device *dev = internal_dev;
401 struct ion_buffer *buffer = NULL;
402 struct ion_heap *heap;
403 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
405 struct dma_buf *dmabuf;
407 pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
408 len, heap_id_mask, flags);
410 * traverse the list of heaps available in this system in priority
411 * order. If the heap type is supported by the client, and matches the
412 * request of the caller allocate from it. Repeat until allocate has
413 * succeeded or all heaps have been tried
415 len = PAGE_ALIGN(len);
420 down_read(&dev->lock);
421 plist_for_each_entry(heap, &dev->heaps, node) {
422 /* if the caller didn't specify this heap id */
423 if (!((1 << heap->id) & heap_id_mask))
425 buffer = ion_buffer_create(heap, dev, len, flags);
435 return PTR_ERR(buffer);
437 exp_info.ops = &dma_buf_ops;
438 exp_info.size = buffer->size;
439 exp_info.flags = O_RDWR;
440 exp_info.priv = buffer;
442 dmabuf = dma_buf_export(&exp_info);
443 if (IS_ERR(dmabuf)) {
444 _ion_buffer_destroy(buffer);
445 return PTR_ERR(dmabuf);
448 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
455 int ion_query_heaps(struct ion_heap_query *query)
457 struct ion_device *dev = internal_dev;
458 struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
459 int ret = -EINVAL, cnt = 0, max_cnt;
460 struct ion_heap *heap;
461 struct ion_heap_data hdata;
463 memset(&hdata, 0, sizeof(hdata));
465 down_read(&dev->lock);
467 query->cnt = dev->heap_cnt;
475 max_cnt = query->cnt;
477 plist_for_each_entry(heap, &dev->heaps, node) {
478 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
479 hdata.name[sizeof(hdata.name) - 1] = '\0';
480 hdata.type = heap->type;
481 hdata.heap_id = heap->id;
483 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
500 static const struct file_operations ion_fops = {
501 .owner = THIS_MODULE,
502 .unlocked_ioctl = ion_ioctl,
504 .compat_ioctl = ion_ioctl,
508 static int debug_shrink_set(void *data, u64 val)
510 struct ion_heap *heap = data;
511 struct shrink_control sc;
514 sc.gfp_mask = GFP_HIGHUSER;
518 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
519 sc.nr_to_scan = objs;
522 heap->shrinker.scan_objects(&heap->shrinker, &sc);
526 static int debug_shrink_get(void *data, u64 *val)
528 struct ion_heap *heap = data;
529 struct shrink_control sc;
532 sc.gfp_mask = GFP_HIGHUSER;
535 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
540 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
541 debug_shrink_set, "%llu\n");
543 void ion_device_add_heap(struct ion_heap *heap)
545 struct dentry *debug_file;
546 struct ion_device *dev = internal_dev;
548 if (!heap->ops->allocate || !heap->ops->free)
549 pr_err("%s: can not add heap with invalid ops struct.\n",
552 spin_lock_init(&heap->free_lock);
553 heap->free_list_size = 0;
555 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
556 ion_heap_init_deferred_free(heap);
558 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
559 ion_heap_init_shrinker(heap);
562 down_write(&dev->lock);
563 heap->id = heap_id++;
565 * use negative heap->id to reverse the priority -- when traversing
566 * the list later attempt higher id numbers first
568 plist_node_init(&heap->node, -heap->id);
569 plist_add(&heap->node, &dev->heaps);
571 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
574 snprintf(debug_name, 64, "%s_shrink", heap->name);
575 debug_file = debugfs_create_file(
576 debug_name, 0644, dev->debug_root, heap,
579 char buf[256], *path;
581 path = dentry_path(dev->debug_root, buf, 256);
582 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
588 up_write(&dev->lock);
590 EXPORT_SYMBOL(ion_device_add_heap);
592 static int ion_device_create(void)
594 struct ion_device *idev;
597 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
601 idev->dev.minor = MISC_DYNAMIC_MINOR;
602 idev->dev.name = "ion";
603 idev->dev.fops = &ion_fops;
604 idev->dev.parent = NULL;
605 ret = misc_register(&idev->dev);
607 pr_err("ion: failed to register misc device.\n");
612 idev->debug_root = debugfs_create_dir("ion", NULL);
613 if (!idev->debug_root) {
614 pr_err("ion: failed to create debugfs root directory.\n");
619 idev->buffers = RB_ROOT;
620 mutex_init(&idev->buffer_lock);
621 init_rwsem(&idev->lock);
622 plist_head_init(&idev->heaps);
626 subsys_initcall(ion_device_create);