3 * drivers/staging/android/ion/ion.c
5 * Copyright (C) 2011 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/atomic.h>
19 #include <linux/device.h>
20 #include <linux/err.h>
21 #include <linux/file.h>
22 #include <linux/freezer.h>
24 #include <linux/anon_inodes.h>
25 #include <linux/kthread.h>
26 #include <linux/list.h>
27 #include <linux/memblock.h>
28 #include <linux/miscdevice.h>
29 #include <linux/export.h>
31 #include <linux/mm_types.h>
32 #include <linux/rbtree.h>
33 #include <linux/slab.h>
34 #include <linux/seq_file.h>
35 #include <linux/uaccess.h>
36 #include <linux/vmalloc.h>
37 #include <linux/debugfs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/idr.h>
43 #include "compat_ion.h"
45 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
47 return (buffer->flags & ION_FLAG_CACHED) &&
48 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
51 bool ion_buffer_cached(struct ion_buffer *buffer)
53 return !!(buffer->flags & ION_FLAG_CACHED);
56 static inline struct page *ion_buffer_page(struct page *page)
58 return (struct page *)((unsigned long)page & ~(1UL));
61 static inline bool ion_buffer_page_is_dirty(struct page *page)
63 return !!((unsigned long)page & 1UL);
66 static inline void ion_buffer_page_dirty(struct page **page)
68 *page = (struct page *)((unsigned long)(*page) | 1UL);
71 static inline void ion_buffer_page_clean(struct page **page)
73 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
76 /* this function should only be called while dev->lock is held */
77 static void ion_buffer_add(struct ion_device *dev,
78 struct ion_buffer *buffer)
80 struct rb_node **p = &dev->buffers.rb_node;
81 struct rb_node *parent = NULL;
82 struct ion_buffer *entry;
86 entry = rb_entry(parent, struct ion_buffer, node);
90 } else if (buffer > entry) {
93 pr_err("%s: buffer already found.", __func__);
98 rb_link_node(&buffer->node, parent, p);
99 rb_insert_color(&buffer->node, &dev->buffers);
102 /* this function should only be called while dev->lock is held */
103 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
104 struct ion_device *dev,
109 struct ion_buffer *buffer;
110 struct sg_table *table;
111 struct scatterlist *sg;
114 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
116 return ERR_PTR(-ENOMEM);
119 buffer->flags = flags;
120 kref_init(&buffer->ref);
122 ret = heap->ops->allocate(heap, buffer, len, align, flags);
125 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
128 ion_heap_freelist_drain(heap, 0);
129 ret = heap->ops->allocate(heap, buffer, len, align,
135 if (buffer->sg_table == NULL) {
136 WARN_ONCE(1, "This heap needs to set the sgtable");
141 table = buffer->sg_table;
145 if (ion_buffer_fault_user_mappings(buffer)) {
146 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
147 struct scatterlist *sg;
150 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
151 if (!buffer->pages) {
156 for_each_sg(table->sgl, sg, table->nents, i) {
157 struct page *page = sg_page(sg);
159 for (j = 0; j < sg->length / PAGE_SIZE; j++)
160 buffer->pages[k++] = page++;
166 INIT_LIST_HEAD(&buffer->vmas);
167 mutex_init(&buffer->lock);
169 * this will set up dma addresses for the sglist -- it is not
170 * technically correct as per the dma api -- a specific
171 * device isn't really taking ownership here. However, in practice on
172 * our systems the only dma_address space is physical addresses.
173 * Additionally, we can't afford the overhead of invalidating every
174 * allocation via dma_map_sg. The implicit contract here is that
175 * memory coming from the heaps is ready for dma, ie if it has a
176 * cached mapping that mapping has been invalidated
178 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
179 sg_dma_address(sg) = sg_phys(sg);
180 sg_dma_len(sg) = sg->length;
182 mutex_lock(&dev->buffer_lock);
183 ion_buffer_add(dev, buffer);
184 mutex_unlock(&dev->buffer_lock);
188 heap->ops->free(buffer);
194 void ion_buffer_destroy(struct ion_buffer *buffer)
196 if (buffer->kmap_cnt > 0) {
197 pr_warn_once("%s: buffer still mapped in the kernel\n",
199 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
201 buffer->heap->ops->free(buffer);
202 vfree(buffer->pages);
206 static void _ion_buffer_destroy(struct kref *kref)
208 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
209 struct ion_heap *heap = buffer->heap;
210 struct ion_device *dev = buffer->dev;
212 mutex_lock(&dev->buffer_lock);
213 rb_erase(&buffer->node, &dev->buffers);
214 mutex_unlock(&dev->buffer_lock);
216 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
217 ion_heap_freelist_add(heap, buffer);
219 ion_buffer_destroy(buffer);
222 static void ion_buffer_get(struct ion_buffer *buffer)
224 kref_get(&buffer->ref);
227 static int ion_buffer_put(struct ion_buffer *buffer)
229 return kref_put(&buffer->ref, _ion_buffer_destroy);
232 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
234 mutex_lock(&buffer->lock);
235 buffer->handle_count++;
236 mutex_unlock(&buffer->lock);
239 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
242 * when a buffer is removed from a handle, if it is not in
243 * any other handles, copy the taskcomm and the pid of the
244 * process it's being removed from into the buffer. At this
245 * point there will be no way to track what processes this buffer is
246 * being used by, it only exists as a dma_buf file descriptor.
247 * The taskcomm and pid can provide a debug hint as to where this fd
250 mutex_lock(&buffer->lock);
251 buffer->handle_count--;
252 BUG_ON(buffer->handle_count < 0);
253 if (!buffer->handle_count) {
254 struct task_struct *task;
256 task = current->group_leader;
257 get_task_comm(buffer->task_comm, task);
258 buffer->pid = task_pid_nr(task);
260 mutex_unlock(&buffer->lock);
263 static struct ion_handle *ion_handle_create(struct ion_client *client,
264 struct ion_buffer *buffer)
266 struct ion_handle *handle;
268 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
270 return ERR_PTR(-ENOMEM);
271 kref_init(&handle->ref);
272 RB_CLEAR_NODE(&handle->node);
273 handle->client = client;
274 ion_buffer_get(buffer);
275 ion_buffer_add_to_handle(buffer);
276 handle->buffer = buffer;
281 static void ion_handle_kmap_put(struct ion_handle *);
283 static void ion_handle_destroy(struct kref *kref)
285 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
286 struct ion_client *client = handle->client;
287 struct ion_buffer *buffer = handle->buffer;
289 mutex_lock(&buffer->lock);
290 while (handle->kmap_cnt)
291 ion_handle_kmap_put(handle);
292 mutex_unlock(&buffer->lock);
294 idr_remove(&client->idr, handle->id);
295 if (!RB_EMPTY_NODE(&handle->node))
296 rb_erase(&handle->node, &client->handles);
298 ion_buffer_remove_from_handle(buffer);
299 ion_buffer_put(buffer);
304 static void ion_handle_get(struct ion_handle *handle)
306 kref_get(&handle->ref);
309 /* Must hold the client lock */
310 static struct ion_handle *ion_handle_get_check_overflow(
311 struct ion_handle *handle)
313 if (atomic_read(&handle->ref.refcount) + 1 == 0)
314 return ERR_PTR(-EOVERFLOW);
315 ion_handle_get(handle);
319 int ion_handle_put_nolock(struct ion_handle *handle)
321 return kref_put(&handle->ref, ion_handle_destroy);
324 int ion_handle_put(struct ion_handle *handle)
326 struct ion_client *client = handle->client;
329 mutex_lock(&client->lock);
330 ret = ion_handle_put_nolock(handle);
331 mutex_unlock(&client->lock);
336 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
337 struct ion_buffer *buffer)
339 struct rb_node *n = client->handles.rb_node;
342 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
344 if (buffer < entry->buffer)
346 else if (buffer > entry->buffer)
351 return ERR_PTR(-EINVAL);
354 struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
357 struct ion_handle *handle;
359 handle = idr_find(&client->idr, id);
361 return ion_handle_get_check_overflow(handle);
363 return ERR_PTR(-EINVAL);
366 bool ion_handle_validate(struct ion_client *client,
367 struct ion_handle *handle)
369 WARN_ON(!mutex_is_locked(&client->lock));
370 return idr_find(&client->idr, handle->id) == handle;
373 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
376 struct rb_node **p = &client->handles.rb_node;
377 struct rb_node *parent = NULL;
378 struct ion_handle *entry;
380 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
388 entry = rb_entry(parent, struct ion_handle, node);
390 if (handle->buffer < entry->buffer)
392 else if (handle->buffer > entry->buffer)
395 WARN(1, "%s: buffer already found.", __func__);
398 rb_link_node(&handle->node, parent, p);
399 rb_insert_color(&handle->node, &client->handles);
404 struct ion_handle *__ion_alloc(struct ion_client *client, size_t len,
405 size_t align, unsigned int heap_id_mask,
406 unsigned int flags, bool grab_handle)
408 struct ion_handle *handle;
409 struct ion_device *dev = client->dev;
410 struct ion_buffer *buffer = NULL;
411 struct ion_heap *heap;
414 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
415 len, align, heap_id_mask, flags);
417 * traverse the list of heaps available in this system in priority
418 * order. If the heap type is supported by the client, and matches the
419 * request of the caller allocate from it. Repeat until allocate has
420 * succeeded or all heaps have been tried
422 len = PAGE_ALIGN(len);
425 return ERR_PTR(-EINVAL);
427 down_read(&dev->lock);
428 plist_for_each_entry(heap, &dev->heaps, node) {
429 /* if the caller didn't specify this heap id */
430 if (!((1 << heap->id) & heap_id_mask))
432 buffer = ion_buffer_create(heap, dev, len, align, flags);
439 return ERR_PTR(-ENODEV);
442 return ERR_CAST(buffer);
444 handle = ion_handle_create(client, buffer);
447 * ion_buffer_create will create a buffer with a ref_cnt of 1,
448 * and ion_handle_create will take a second reference, drop one here
450 ion_buffer_put(buffer);
455 mutex_lock(&client->lock);
457 ion_handle_get(handle);
458 ret = ion_handle_add(client, handle);
459 mutex_unlock(&client->lock);
461 ion_handle_put(handle);
462 handle = ERR_PTR(ret);
468 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
469 size_t align, unsigned int heap_id_mask,
472 return __ion_alloc(client, len, align, heap_id_mask, flags, false);
474 EXPORT_SYMBOL(ion_alloc);
476 void ion_free_nolock(struct ion_client *client,
477 struct ion_handle *handle)
479 if (!ion_handle_validate(client, handle)) {
480 WARN(1, "%s: invalid handle passed to free.\n", __func__);
483 ion_handle_put_nolock(handle);
486 void ion_free(struct ion_client *client, struct ion_handle *handle)
488 BUG_ON(client != handle->client);
490 mutex_lock(&client->lock);
491 ion_free_nolock(client, handle);
492 mutex_unlock(&client->lock);
494 EXPORT_SYMBOL(ion_free);
496 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
500 if (buffer->kmap_cnt) {
501 if (buffer->kmap_cnt == INT_MAX)
502 return ERR_PTR(-EOVERFLOW);
505 return buffer->vaddr;
507 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
508 if (WARN_ONCE(vaddr == NULL,
509 "heap->ops->map_kernel should return ERR_PTR on error"))
510 return ERR_PTR(-EINVAL);
513 buffer->vaddr = vaddr;
518 static void *ion_handle_kmap_get(struct ion_handle *handle)
520 struct ion_buffer *buffer = handle->buffer;
523 if (handle->kmap_cnt) {
524 if (handle->kmap_cnt == INT_MAX)
525 return ERR_PTR(-EOVERFLOW);
528 return buffer->vaddr;
530 vaddr = ion_buffer_kmap_get(buffer);
537 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
540 if (!buffer->kmap_cnt) {
541 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
542 buffer->vaddr = NULL;
546 static void ion_handle_kmap_put(struct ion_handle *handle)
548 struct ion_buffer *buffer = handle->buffer;
550 if (!handle->kmap_cnt) {
551 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
555 if (!handle->kmap_cnt)
556 ion_buffer_kmap_put(buffer);
559 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
561 struct ion_buffer *buffer;
564 mutex_lock(&client->lock);
565 if (!ion_handle_validate(client, handle)) {
566 pr_err("%s: invalid handle passed to map_kernel.\n",
568 mutex_unlock(&client->lock);
569 return ERR_PTR(-EINVAL);
572 buffer = handle->buffer;
574 if (!handle->buffer->heap->ops->map_kernel) {
575 pr_err("%s: map_kernel is not implemented by this heap.\n",
577 mutex_unlock(&client->lock);
578 return ERR_PTR(-ENODEV);
581 mutex_lock(&buffer->lock);
582 vaddr = ion_handle_kmap_get(handle);
583 mutex_unlock(&buffer->lock);
584 mutex_unlock(&client->lock);
587 EXPORT_SYMBOL(ion_map_kernel);
589 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
591 struct ion_buffer *buffer;
593 mutex_lock(&client->lock);
594 buffer = handle->buffer;
595 mutex_lock(&buffer->lock);
596 ion_handle_kmap_put(handle);
597 mutex_unlock(&buffer->lock);
598 mutex_unlock(&client->lock);
600 EXPORT_SYMBOL(ion_unmap_kernel);
602 static struct mutex debugfs_mutex;
603 static struct rb_root *ion_root_client;
604 static int is_client_alive(struct ion_client *client)
606 struct rb_node *node;
607 struct ion_client *tmp;
608 struct ion_device *dev;
610 node = ion_root_client->rb_node;
611 dev = container_of(ion_root_client, struct ion_device, clients);
613 down_read(&dev->lock);
615 tmp = rb_entry(node, struct ion_client, node);
617 node = node->rb_left;
618 } else if (client > tmp) {
619 node = node->rb_right;
630 static int ion_debug_client_show(struct seq_file *s, void *unused)
632 struct ion_client *client = s->private;
634 size_t sizes[ION_NUM_HEAP_IDS] = {0};
635 const char *names[ION_NUM_HEAP_IDS] = {NULL};
638 mutex_lock(&debugfs_mutex);
639 if (!is_client_alive(client)) {
640 seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
642 mutex_unlock(&debugfs_mutex);
646 mutex_lock(&client->lock);
647 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
648 struct ion_handle *handle = rb_entry(n, struct ion_handle,
650 unsigned int id = handle->buffer->heap->id;
653 names[id] = handle->buffer->heap->name;
654 sizes[id] += handle->buffer->size;
656 mutex_unlock(&client->lock);
657 mutex_unlock(&debugfs_mutex);
659 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
660 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
663 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
668 static int ion_debug_client_open(struct inode *inode, struct file *file)
670 return single_open(file, ion_debug_client_show, inode->i_private);
673 static const struct file_operations debug_client_fops = {
674 .open = ion_debug_client_open,
677 .release = single_release,
680 static int ion_get_client_serial(const struct rb_root *root,
681 const unsigned char *name)
684 struct rb_node *node;
686 for (node = rb_first(root); node; node = rb_next(node)) {
687 struct ion_client *client = rb_entry(node, struct ion_client,
690 if (strcmp(client->name, name))
692 serial = max(serial, client->display_serial);
697 struct ion_client *ion_client_create(struct ion_device *dev,
700 struct ion_client *client;
701 struct task_struct *task;
703 struct rb_node *parent = NULL;
704 struct ion_client *entry;
708 pr_err("%s: Name cannot be null\n", __func__);
709 return ERR_PTR(-EINVAL);
712 get_task_struct(current->group_leader);
713 task_lock(current->group_leader);
714 pid = task_pid_nr(current->group_leader);
716 * don't bother to store task struct for kernel threads,
717 * they can't be killed anyway
719 if (current->group_leader->flags & PF_KTHREAD) {
720 put_task_struct(current->group_leader);
723 task = current->group_leader;
725 task_unlock(current->group_leader);
727 client = kzalloc(sizeof(*client), GFP_KERNEL);
729 goto err_put_task_struct;
732 client->handles = RB_ROOT;
733 idr_init(&client->idr);
734 mutex_init(&client->lock);
737 client->name = kstrdup(name, GFP_KERNEL);
739 goto err_free_client;
741 down_write(&dev->lock);
742 client->display_serial = ion_get_client_serial(&dev->clients, name);
743 client->display_name = kasprintf(
744 GFP_KERNEL, "%s-%d", name, client->display_serial);
745 if (!client->display_name) {
746 up_write(&dev->lock);
747 goto err_free_client_name;
749 p = &dev->clients.rb_node;
752 entry = rb_entry(parent, struct ion_client, node);
756 else if (client > entry)
759 rb_link_node(&client->node, parent, p);
760 rb_insert_color(&client->node, &dev->clients);
762 client->debug_root = debugfs_create_file(client->display_name, 0664,
763 dev->clients_debug_root,
764 client, &debug_client_fops);
765 if (!client->debug_root) {
766 char buf[256], *path;
768 path = dentry_path(dev->clients_debug_root, buf, 256);
769 pr_err("Failed to create client debugfs at %s/%s\n",
770 path, client->display_name);
773 up_write(&dev->lock);
777 err_free_client_name:
783 put_task_struct(current->group_leader);
784 return ERR_PTR(-ENOMEM);
786 EXPORT_SYMBOL(ion_client_create);
788 void ion_client_destroy(struct ion_client *client)
790 struct ion_device *dev = client->dev;
793 pr_debug("%s: %d\n", __func__, __LINE__);
794 mutex_lock(&debugfs_mutex);
795 while ((n = rb_first(&client->handles))) {
796 struct ion_handle *handle = rb_entry(n, struct ion_handle,
798 ion_handle_destroy(&handle->ref);
801 idr_destroy(&client->idr);
803 down_write(&dev->lock);
805 put_task_struct(client->task);
806 rb_erase(&client->node, &dev->clients);
807 debugfs_remove_recursive(client->debug_root);
808 up_write(&dev->lock);
810 kfree(client->display_name);
813 mutex_unlock(&debugfs_mutex);
815 EXPORT_SYMBOL(ion_client_destroy);
817 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
819 enum dma_data_direction direction);
821 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
822 enum dma_data_direction direction)
824 struct dma_buf *dmabuf = attachment->dmabuf;
825 struct ion_buffer *buffer = dmabuf->priv;
827 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
828 return buffer->sg_table;
831 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
832 struct sg_table *table,
833 enum dma_data_direction direction)
837 void ion_pages_sync_for_device(struct device *dev, struct page *page,
838 size_t size, enum dma_data_direction dir)
840 struct scatterlist sg;
842 sg_init_table(&sg, 1);
843 sg_set_page(&sg, page, size, 0);
845 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
846 * for the targeted device, but this works on the currently targeted
849 sg_dma_address(&sg) = page_to_phys(page);
850 dma_sync_sg_for_device(dev, &sg, 1, dir);
853 struct ion_vma_list {
854 struct list_head list;
855 struct vm_area_struct *vma;
858 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
860 enum dma_data_direction dir)
862 struct ion_vma_list *vma_list;
863 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
866 pr_debug("%s: syncing for device %s\n", __func__,
867 dev ? dev_name(dev) : "null");
869 if (!ion_buffer_fault_user_mappings(buffer))
872 mutex_lock(&buffer->lock);
873 for (i = 0; i < pages; i++) {
874 struct page *page = buffer->pages[i];
876 if (ion_buffer_page_is_dirty(page))
877 ion_pages_sync_for_device(dev, ion_buffer_page(page),
880 ion_buffer_page_clean(buffer->pages + i);
882 list_for_each_entry(vma_list, &buffer->vmas, list) {
883 struct vm_area_struct *vma = vma_list->vma;
885 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
888 mutex_unlock(&buffer->lock);
891 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
893 struct ion_buffer *buffer = vma->vm_private_data;
897 mutex_lock(&buffer->lock);
898 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
899 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
901 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
902 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
903 mutex_unlock(&buffer->lock);
905 return VM_FAULT_ERROR;
907 return VM_FAULT_NOPAGE;
910 static void ion_vm_open(struct vm_area_struct *vma)
912 struct ion_buffer *buffer = vma->vm_private_data;
913 struct ion_vma_list *vma_list;
915 vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
919 mutex_lock(&buffer->lock);
920 list_add(&vma_list->list, &buffer->vmas);
921 mutex_unlock(&buffer->lock);
922 pr_debug("%s: adding %p\n", __func__, vma);
925 static void ion_vm_close(struct vm_area_struct *vma)
927 struct ion_buffer *buffer = vma->vm_private_data;
928 struct ion_vma_list *vma_list, *tmp;
930 pr_debug("%s\n", __func__);
931 mutex_lock(&buffer->lock);
932 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
933 if (vma_list->vma != vma)
935 list_del(&vma_list->list);
937 pr_debug("%s: deleting %p\n", __func__, vma);
940 mutex_unlock(&buffer->lock);
943 static const struct vm_operations_struct ion_vma_ops = {
945 .close = ion_vm_close,
946 .fault = ion_vm_fault,
949 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
951 struct ion_buffer *buffer = dmabuf->priv;
954 if (!buffer->heap->ops->map_user) {
955 pr_err("%s: this heap does not define a method for mapping to userspace\n",
960 if (ion_buffer_fault_user_mappings(buffer)) {
961 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
963 vma->vm_private_data = buffer;
964 vma->vm_ops = &ion_vma_ops;
969 if (!(buffer->flags & ION_FLAG_CACHED))
970 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
972 mutex_lock(&buffer->lock);
973 /* now map it to userspace */
974 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
975 mutex_unlock(&buffer->lock);
978 pr_err("%s: failure mapping buffer to userspace\n",
984 static void ion_dma_buf_release(struct dma_buf *dmabuf)
986 struct ion_buffer *buffer = dmabuf->priv;
988 ion_buffer_put(buffer);
991 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
993 struct ion_buffer *buffer = dmabuf->priv;
995 return buffer->vaddr + offset * PAGE_SIZE;
998 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1003 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1004 enum dma_data_direction direction)
1006 struct ion_buffer *buffer = dmabuf->priv;
1009 if (!buffer->heap->ops->map_kernel) {
1010 pr_err("%s: map kernel is not implemented by this heap.\n",
1015 mutex_lock(&buffer->lock);
1016 vaddr = ion_buffer_kmap_get(buffer);
1017 mutex_unlock(&buffer->lock);
1018 return PTR_ERR_OR_ZERO(vaddr);
1021 static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1022 enum dma_data_direction direction)
1024 struct ion_buffer *buffer = dmabuf->priv;
1026 mutex_lock(&buffer->lock);
1027 ion_buffer_kmap_put(buffer);
1028 mutex_unlock(&buffer->lock);
1033 static struct dma_buf_ops dma_buf_ops = {
1034 .map_dma_buf = ion_map_dma_buf,
1035 .unmap_dma_buf = ion_unmap_dma_buf,
1037 .release = ion_dma_buf_release,
1038 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1039 .end_cpu_access = ion_dma_buf_end_cpu_access,
1040 .kmap_atomic = ion_dma_buf_kmap,
1041 .kunmap_atomic = ion_dma_buf_kunmap,
1042 .kmap = ion_dma_buf_kmap,
1043 .kunmap = ion_dma_buf_kunmap,
1046 static struct dma_buf *__ion_share_dma_buf(struct ion_client *client,
1047 struct ion_handle *handle,
1050 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1051 struct ion_buffer *buffer;
1052 struct dma_buf *dmabuf;
1056 mutex_lock(&client->lock);
1057 valid_handle = ion_handle_validate(client, handle);
1058 if (!valid_handle) {
1059 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1061 mutex_unlock(&client->lock);
1062 return ERR_PTR(-EINVAL);
1064 buffer = handle->buffer;
1065 ion_buffer_get(buffer);
1067 mutex_unlock(&client->lock);
1069 exp_info.ops = &dma_buf_ops;
1070 exp_info.size = buffer->size;
1071 exp_info.flags = O_RDWR;
1072 exp_info.priv = buffer;
1074 dmabuf = dma_buf_export(&exp_info);
1075 if (IS_ERR(dmabuf)) {
1076 ion_buffer_put(buffer);
1083 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1084 struct ion_handle *handle)
1086 return __ion_share_dma_buf(client, handle, true);
1088 EXPORT_SYMBOL(ion_share_dma_buf);
1090 static int __ion_share_dma_buf_fd(struct ion_client *client,
1091 struct ion_handle *handle, bool lock_client)
1093 struct dma_buf *dmabuf;
1096 dmabuf = __ion_share_dma_buf(client, handle, lock_client);
1098 return PTR_ERR(dmabuf);
1100 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1102 dma_buf_put(dmabuf);
1107 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1109 return __ion_share_dma_buf_fd(client, handle, true);
1111 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1113 int ion_share_dma_buf_fd_nolock(struct ion_client *client,
1114 struct ion_handle *handle)
1116 return __ion_share_dma_buf_fd(client, handle, false);
1119 struct ion_handle *ion_import_dma_buf(struct ion_client *client,
1120 struct dma_buf *dmabuf)
1122 struct ion_buffer *buffer;
1123 struct ion_handle *handle;
1126 /* if this memory came from ion */
1128 if (dmabuf->ops != &dma_buf_ops) {
1129 pr_err("%s: can not import dmabuf from another exporter\n",
1131 return ERR_PTR(-EINVAL);
1133 buffer = dmabuf->priv;
1135 mutex_lock(&client->lock);
1136 /* if a handle exists for this buffer just take a reference to it */
1137 handle = ion_handle_lookup(client, buffer);
1138 if (!IS_ERR(handle)) {
1139 handle = ion_handle_get_check_overflow(handle);
1140 mutex_unlock(&client->lock);
1144 handle = ion_handle_create(client, buffer);
1145 if (IS_ERR(handle)) {
1146 mutex_unlock(&client->lock);
1150 ret = ion_handle_add(client, handle);
1151 mutex_unlock(&client->lock);
1153 ion_handle_put(handle);
1154 handle = ERR_PTR(ret);
1160 EXPORT_SYMBOL(ion_import_dma_buf);
1162 struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
1164 struct dma_buf *dmabuf;
1165 struct ion_handle *handle;
1167 dmabuf = dma_buf_get(fd);
1169 return ERR_CAST(dmabuf);
1171 handle = ion_import_dma_buf(client, dmabuf);
1172 dma_buf_put(dmabuf);
1175 EXPORT_SYMBOL(ion_import_dma_buf_fd);
1177 int ion_sync_for_device(struct ion_client *client, int fd)
1179 struct dma_buf *dmabuf;
1180 struct ion_buffer *buffer;
1182 dmabuf = dma_buf_get(fd);
1184 return PTR_ERR(dmabuf);
1186 /* if this memory came from ion */
1187 if (dmabuf->ops != &dma_buf_ops) {
1188 pr_err("%s: can not sync dmabuf from another exporter\n",
1190 dma_buf_put(dmabuf);
1193 buffer = dmabuf->priv;
1195 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1196 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1197 dma_buf_put(dmabuf);
1201 int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
1203 struct ion_device *dev = client->dev;
1204 struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
1205 int ret = -EINVAL, cnt = 0, max_cnt;
1206 struct ion_heap *heap;
1207 struct ion_heap_data hdata;
1209 memset(&hdata, 0, sizeof(hdata));
1211 down_read(&dev->lock);
1213 query->cnt = dev->heap_cnt;
1218 if (query->cnt <= 0)
1221 max_cnt = query->cnt;
1223 plist_for_each_entry(heap, &dev->heaps, node) {
1224 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
1225 hdata.name[sizeof(hdata.name) - 1] = '\0';
1226 hdata.type = heap->type;
1227 hdata.heap_id = heap->id;
1229 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
1241 up_read(&dev->lock);
1245 static int ion_release(struct inode *inode, struct file *file)
1247 struct ion_client *client = file->private_data;
1249 pr_debug("%s: %d\n", __func__, __LINE__);
1250 ion_client_destroy(client);
1254 static int ion_open(struct inode *inode, struct file *file)
1256 struct miscdevice *miscdev = file->private_data;
1257 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1258 struct ion_client *client;
1259 char debug_name[64];
1261 pr_debug("%s: %d\n", __func__, __LINE__);
1262 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1263 client = ion_client_create(dev, debug_name);
1265 return PTR_ERR(client);
1266 file->private_data = client;
1271 static const struct file_operations ion_fops = {
1272 .owner = THIS_MODULE,
1274 .release = ion_release,
1275 .unlocked_ioctl = ion_ioctl,
1276 .compat_ioctl = compat_ion_ioctl,
1279 static size_t ion_debug_heap_total(struct ion_client *client,
1285 mutex_lock(&client->lock);
1286 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1287 struct ion_handle *handle = rb_entry(n,
1290 if (handle->buffer->heap->id == id)
1291 size += handle->buffer->size;
1293 mutex_unlock(&client->lock);
1297 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1299 struct ion_heap *heap = s->private;
1300 struct ion_device *dev = heap->dev;
1302 size_t total_size = 0;
1303 size_t total_orphaned_size = 0;
1305 seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
1306 seq_puts(s, "----------------------------------------------------\n");
1308 mutex_lock(&debugfs_mutex);
1309 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1310 struct ion_client *client = rb_entry(n, struct ion_client,
1312 size_t size = ion_debug_heap_total(client, heap->id);
1317 char task_comm[TASK_COMM_LEN];
1319 get_task_comm(task_comm, client->task);
1320 seq_printf(s, "%16s %16u %16zu\n", task_comm,
1323 seq_printf(s, "%16s %16u %16zu\n", client->name,
1327 mutex_unlock(&debugfs_mutex);
1329 seq_puts(s, "----------------------------------------------------\n");
1330 seq_puts(s, "orphaned allocations (info is from last known client):\n");
1331 mutex_lock(&dev->buffer_lock);
1332 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1333 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1335 if (buffer->heap->id != heap->id)
1337 total_size += buffer->size;
1338 if (!buffer->handle_count) {
1339 seq_printf(s, "%16s %16u %16zu %d %d\n",
1340 buffer->task_comm, buffer->pid,
1341 buffer->size, buffer->kmap_cnt,
1342 atomic_read(&buffer->ref.refcount));
1343 total_orphaned_size += buffer->size;
1346 mutex_unlock(&dev->buffer_lock);
1347 seq_puts(s, "----------------------------------------------------\n");
1348 seq_printf(s, "%16s %16zu\n", "total orphaned",
1349 total_orphaned_size);
1350 seq_printf(s, "%16s %16zu\n", "total ", total_size);
1351 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1352 seq_printf(s, "%16s %16zu\n", "deferred free",
1353 heap->free_list_size);
1354 seq_puts(s, "----------------------------------------------------\n");
1356 if (heap->debug_show)
1357 heap->debug_show(heap, s, unused);
1362 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1364 return single_open(file, ion_debug_heap_show, inode->i_private);
1367 static const struct file_operations debug_heap_fops = {
1368 .open = ion_debug_heap_open,
1370 .llseek = seq_lseek,
1371 .release = single_release,
1374 static int debug_shrink_set(void *data, u64 val)
1376 struct ion_heap *heap = data;
1377 struct shrink_control sc;
1380 sc.gfp_mask = GFP_HIGHUSER;
1381 sc.nr_to_scan = val;
1384 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1385 sc.nr_to_scan = objs;
1388 heap->shrinker.scan_objects(&heap->shrinker, &sc);
1392 static int debug_shrink_get(void *data, u64 *val)
1394 struct ion_heap *heap = data;
1395 struct shrink_control sc;
1398 sc.gfp_mask = GFP_HIGHUSER;
1401 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1406 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1407 debug_shrink_set, "%llu\n");
1409 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1411 struct dentry *debug_file;
1413 if (!heap->ops->allocate || !heap->ops->free)
1414 pr_err("%s: can not add heap with invalid ops struct.\n",
1417 spin_lock_init(&heap->free_lock);
1418 heap->free_list_size = 0;
1420 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1421 ion_heap_init_deferred_free(heap);
1423 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1424 ion_heap_init_shrinker(heap);
1427 down_write(&dev->lock);
1429 * use negative heap->id to reverse the priority -- when traversing
1430 * the list later attempt higher id numbers first
1432 plist_node_init(&heap->node, -heap->id);
1433 plist_add(&heap->node, &dev->heaps);
1434 debug_file = debugfs_create_file(heap->name, 0664,
1435 dev->heaps_debug_root, heap,
1439 char buf[256], *path;
1441 path = dentry_path(dev->heaps_debug_root, buf, 256);
1442 pr_err("Failed to create heap debugfs at %s/%s\n",
1446 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
1447 char debug_name[64];
1449 snprintf(debug_name, 64, "%s_shrink", heap->name);
1450 debug_file = debugfs_create_file(
1451 debug_name, 0644, dev->heaps_debug_root, heap,
1452 &debug_shrink_fops);
1454 char buf[256], *path;
1456 path = dentry_path(dev->heaps_debug_root, buf, 256);
1457 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1463 up_write(&dev->lock);
1465 EXPORT_SYMBOL(ion_device_add_heap);
1467 struct ion_device *ion_device_create(long (*custom_ioctl)
1468 (struct ion_client *client,
1472 struct ion_device *idev;
1475 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
1477 return ERR_PTR(-ENOMEM);
1479 idev->dev.minor = MISC_DYNAMIC_MINOR;
1480 idev->dev.name = "ion";
1481 idev->dev.fops = &ion_fops;
1482 idev->dev.parent = NULL;
1483 ret = misc_register(&idev->dev);
1485 pr_err("ion: failed to register misc device.\n");
1487 return ERR_PTR(ret);
1490 idev->debug_root = debugfs_create_dir("ion", NULL);
1491 if (!idev->debug_root) {
1492 pr_err("ion: failed to create debugfs root directory.\n");
1495 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1496 if (!idev->heaps_debug_root) {
1497 pr_err("ion: failed to create debugfs heaps directory.\n");
1500 idev->clients_debug_root = debugfs_create_dir("clients",
1502 if (!idev->clients_debug_root)
1503 pr_err("ion: failed to create debugfs clients directory.\n");
1507 idev->custom_ioctl = custom_ioctl;
1508 idev->buffers = RB_ROOT;
1509 mutex_init(&idev->buffer_lock);
1510 init_rwsem(&idev->lock);
1511 plist_head_init(&idev->heaps);
1512 idev->clients = RB_ROOT;
1513 ion_root_client = &idev->clients;
1514 mutex_init(&debugfs_mutex);
1517 EXPORT_SYMBOL(ion_device_create);
1519 void ion_device_destroy(struct ion_device *dev)
1521 misc_deregister(&dev->dev);
1522 debugfs_remove_recursive(dev->debug_root);
1523 /* XXX need to free the heaps and clients ? */
1526 EXPORT_SYMBOL(ion_device_destroy);