3 * drivers/staging/android/ion/ion.c
5 * Copyright (C) 2011 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/atomic.h>
19 #include <linux/device.h>
20 #include <linux/err.h>
21 #include <linux/file.h>
22 #include <linux/freezer.h>
24 #include <linux/anon_inodes.h>
25 #include <linux/kthread.h>
26 #include <linux/list.h>
27 #include <linux/memblock.h>
28 #include <linux/miscdevice.h>
29 #include <linux/export.h>
31 #include <linux/mm_types.h>
32 #include <linux/rbtree.h>
33 #include <linux/slab.h>
34 #include <linux/seq_file.h>
35 #include <linux/uaccess.h>
36 #include <linux/vmalloc.h>
37 #include <linux/debugfs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/idr.h>
43 #include "compat_ion.h"
46 * struct ion_device - the metadata of the ion device node
47 * @dev: the actual misc device
48 * @buffers: an rb tree of all the existing buffers
49 * @buffer_lock: lock protecting the tree of buffers
50 * @lock: rwsem protecting the tree of heaps and clients
51 * @heaps: list of all the heaps in the system
52 * @user_clients: list of all the clients created from userspace
55 struct miscdevice dev;
56 struct rb_root buffers;
57 struct mutex buffer_lock;
58 struct rw_semaphore lock;
59 struct plist_head heaps;
60 long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
62 struct rb_root clients;
63 struct dentry *debug_root;
64 struct dentry *heaps_debug_root;
65 struct dentry *clients_debug_root;
69 * struct ion_client - a process/hw block local address space
70 * @node: node in the tree of all clients
71 * @dev: backpointer to ion device
72 * @handles: an rb tree of all the handles in this client
73 * @idr: an idr space for allocating handle ids
74 * @lock: lock protecting the tree of handles
75 * @name: used for debugging
76 * @display_name: used for debugging (unique version of @name)
77 * @display_serial: used for debugging (to make display_name unique)
78 * @task: used for debugging
80 * A client represents a list of buffers this client may access.
81 * The mutex stored here is used to protect both handles tree
82 * as well as the handles themselves, and should be held while modifying either.
86 struct ion_device *dev;
87 struct rb_root handles;
93 struct task_struct *task;
95 struct dentry *debug_root;
99 * ion_handle - a client local reference to a buffer
100 * @ref: reference count
101 * @client: back pointer to the client the buffer resides in
102 * @buffer: pointer to the buffer
103 * @node: node in the client's handle rbtree
104 * @kmap_cnt: count of times this client has mapped to kernel
105 * @id: client-unique id allocated by client->idr
107 * Modifications to node, map_cnt or mapping should be protected by the
108 * lock in the client. Other fields are never changed after initialization.
112 struct ion_client *client;
113 struct ion_buffer *buffer;
115 unsigned int kmap_cnt;
119 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
121 return (buffer->flags & ION_FLAG_CACHED) &&
122 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
125 bool ion_buffer_cached(struct ion_buffer *buffer)
127 return !!(buffer->flags & ION_FLAG_CACHED);
130 static inline struct page *ion_buffer_page(struct page *page)
132 return (struct page *)((unsigned long)page & ~(1UL));
135 static inline bool ion_buffer_page_is_dirty(struct page *page)
137 return !!((unsigned long)page & 1UL);
140 static inline void ion_buffer_page_dirty(struct page **page)
142 *page = (struct page *)((unsigned long)(*page) | 1UL);
145 static inline void ion_buffer_page_clean(struct page **page)
147 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
150 /* this function should only be called while dev->lock is held */
151 static void ion_buffer_add(struct ion_device *dev,
152 struct ion_buffer *buffer)
154 struct rb_node **p = &dev->buffers.rb_node;
155 struct rb_node *parent = NULL;
156 struct ion_buffer *entry;
160 entry = rb_entry(parent, struct ion_buffer, node);
162 if (buffer < entry) {
164 } else if (buffer > entry) {
167 pr_err("%s: buffer already found.", __func__);
172 rb_link_node(&buffer->node, parent, p);
173 rb_insert_color(&buffer->node, &dev->buffers);
176 /* this function should only be called while dev->lock is held */
177 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
178 struct ion_device *dev,
183 struct ion_buffer *buffer;
184 struct sg_table *table;
185 struct scatterlist *sg;
188 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
190 return ERR_PTR(-ENOMEM);
193 buffer->flags = flags;
194 kref_init(&buffer->ref);
196 ret = heap->ops->allocate(heap, buffer, len, align, flags);
199 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
202 ion_heap_freelist_drain(heap, 0);
203 ret = heap->ops->allocate(heap, buffer, len, align,
212 table = heap->ops->map_dma(heap, buffer);
213 if (WARN_ONCE(table == NULL,
214 "heap->ops->map_dma should return ERR_PTR on error"))
215 table = ERR_PTR(-EINVAL);
221 buffer->sg_table = table;
222 if (ion_buffer_fault_user_mappings(buffer)) {
223 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
224 struct scatterlist *sg;
227 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
228 if (!buffer->pages) {
233 for_each_sg(table->sgl, sg, table->nents, i) {
234 struct page *page = sg_page(sg);
236 for (j = 0; j < sg->length / PAGE_SIZE; j++)
237 buffer->pages[k++] = page++;
243 INIT_LIST_HEAD(&buffer->vmas);
244 mutex_init(&buffer->lock);
246 * this will set up dma addresses for the sglist -- it is not
247 * technically correct as per the dma api -- a specific
248 * device isn't really taking ownership here. However, in practice on
249 * our systems the only dma_address space is physical addresses.
250 * Additionally, we can't afford the overhead of invalidating every
251 * allocation via dma_map_sg. The implicit contract here is that
252 * memory coming from the heaps is ready for dma, ie if it has a
253 * cached mapping that mapping has been invalidated
255 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
256 sg_dma_address(sg) = sg_phys(sg);
257 sg_dma_len(sg) = sg->length;
259 mutex_lock(&dev->buffer_lock);
260 ion_buffer_add(dev, buffer);
261 mutex_unlock(&dev->buffer_lock);
265 heap->ops->unmap_dma(heap, buffer);
267 heap->ops->free(buffer);
273 void ion_buffer_destroy(struct ion_buffer *buffer)
275 if (WARN_ON(buffer->kmap_cnt > 0))
276 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
277 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
278 buffer->heap->ops->free(buffer);
279 vfree(buffer->pages);
283 static void _ion_buffer_destroy(struct kref *kref)
285 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
286 struct ion_heap *heap = buffer->heap;
287 struct ion_device *dev = buffer->dev;
289 mutex_lock(&dev->buffer_lock);
290 rb_erase(&buffer->node, &dev->buffers);
291 mutex_unlock(&dev->buffer_lock);
293 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
294 ion_heap_freelist_add(heap, buffer);
296 ion_buffer_destroy(buffer);
299 static void ion_buffer_get(struct ion_buffer *buffer)
301 kref_get(&buffer->ref);
304 static int ion_buffer_put(struct ion_buffer *buffer)
306 return kref_put(&buffer->ref, _ion_buffer_destroy);
309 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
311 mutex_lock(&buffer->lock);
312 buffer->handle_count++;
313 mutex_unlock(&buffer->lock);
316 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
319 * when a buffer is removed from a handle, if it is not in
320 * any other handles, copy the taskcomm and the pid of the
321 * process it's being removed from into the buffer. At this
322 * point there will be no way to track what processes this buffer is
323 * being used by, it only exists as a dma_buf file descriptor.
324 * The taskcomm and pid can provide a debug hint as to where this fd
327 mutex_lock(&buffer->lock);
328 buffer->handle_count--;
329 BUG_ON(buffer->handle_count < 0);
330 if (!buffer->handle_count) {
331 struct task_struct *task;
333 task = current->group_leader;
334 get_task_comm(buffer->task_comm, task);
335 buffer->pid = task_pid_nr(task);
337 mutex_unlock(&buffer->lock);
340 static struct ion_handle *ion_handle_create(struct ion_client *client,
341 struct ion_buffer *buffer)
343 struct ion_handle *handle;
345 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
347 return ERR_PTR(-ENOMEM);
348 kref_init(&handle->ref);
349 RB_CLEAR_NODE(&handle->node);
350 handle->client = client;
351 ion_buffer_get(buffer);
352 ion_buffer_add_to_handle(buffer);
353 handle->buffer = buffer;
358 static void ion_handle_kmap_put(struct ion_handle *);
360 static void ion_handle_destroy(struct kref *kref)
362 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
363 struct ion_client *client = handle->client;
364 struct ion_buffer *buffer = handle->buffer;
366 mutex_lock(&buffer->lock);
367 while (handle->kmap_cnt)
368 ion_handle_kmap_put(handle);
369 mutex_unlock(&buffer->lock);
371 idr_remove(&client->idr, handle->id);
372 if (!RB_EMPTY_NODE(&handle->node))
373 rb_erase(&handle->node, &client->handles);
375 ion_buffer_remove_from_handle(buffer);
376 ion_buffer_put(buffer);
381 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
383 return handle->buffer;
386 static void ion_handle_get(struct ion_handle *handle)
388 kref_get(&handle->ref);
391 /* Must hold the client lock */
392 static struct ion_handle *ion_handle_get_check_overflow(
393 struct ion_handle *handle)
395 if (atomic_read(&handle->ref.refcount) + 1 == 0)
396 return ERR_PTR(-EOVERFLOW);
397 ion_handle_get(handle);
401 static int ion_handle_put_nolock(struct ion_handle *handle)
405 ret = kref_put(&handle->ref, ion_handle_destroy);
410 int ion_handle_put(struct ion_handle *handle)
412 struct ion_client *client = handle->client;
415 mutex_lock(&client->lock);
416 ret = ion_handle_put_nolock(handle);
417 mutex_unlock(&client->lock);
422 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
423 struct ion_buffer *buffer)
425 struct rb_node *n = client->handles.rb_node;
428 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
430 if (buffer < entry->buffer)
432 else if (buffer > entry->buffer)
437 return ERR_PTR(-EINVAL);
440 static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
443 struct ion_handle *handle;
445 handle = idr_find(&client->idr, id);
447 return ion_handle_get_check_overflow(handle);
449 return ERR_PTR(-EINVAL);
452 static bool ion_handle_validate(struct ion_client *client,
453 struct ion_handle *handle)
455 WARN_ON(!mutex_is_locked(&client->lock));
456 return idr_find(&client->idr, handle->id) == handle;
459 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
462 struct rb_node **p = &client->handles.rb_node;
463 struct rb_node *parent = NULL;
464 struct ion_handle *entry;
466 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
474 entry = rb_entry(parent, struct ion_handle, node);
476 if (handle->buffer < entry->buffer)
478 else if (handle->buffer > entry->buffer)
481 WARN(1, "%s: buffer already found.", __func__);
484 rb_link_node(&handle->node, parent, p);
485 rb_insert_color(&handle->node, &client->handles);
490 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
491 size_t align, unsigned int heap_id_mask,
494 struct ion_handle *handle;
495 struct ion_device *dev = client->dev;
496 struct ion_buffer *buffer = NULL;
497 struct ion_heap *heap;
500 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
501 len, align, heap_id_mask, flags);
503 * traverse the list of heaps available in this system in priority
504 * order. If the heap type is supported by the client, and matches the
505 * request of the caller allocate from it. Repeat until allocate has
506 * succeeded or all heaps have been tried
508 len = PAGE_ALIGN(len);
511 return ERR_PTR(-EINVAL);
513 down_read(&dev->lock);
514 plist_for_each_entry(heap, &dev->heaps, node) {
515 /* if the caller didn't specify this heap id */
516 if (!((1 << heap->id) & heap_id_mask))
518 buffer = ion_buffer_create(heap, dev, len, align, flags);
525 return ERR_PTR(-ENODEV);
528 return ERR_CAST(buffer);
530 handle = ion_handle_create(client, buffer);
533 * ion_buffer_create will create a buffer with a ref_cnt of 1,
534 * and ion_handle_create will take a second reference, drop one here
536 ion_buffer_put(buffer);
541 mutex_lock(&client->lock);
542 ret = ion_handle_add(client, handle);
543 mutex_unlock(&client->lock);
545 ion_handle_put(handle);
546 handle = ERR_PTR(ret);
551 EXPORT_SYMBOL(ion_alloc);
553 static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
557 BUG_ON(client != handle->client);
559 valid_handle = ion_handle_validate(client, handle);
562 WARN(1, "%s: invalid handle passed to free.\n", __func__);
565 ion_handle_put_nolock(handle);
568 void ion_free(struct ion_client *client, struct ion_handle *handle)
570 BUG_ON(client != handle->client);
572 mutex_lock(&client->lock);
573 ion_free_nolock(client, handle);
574 mutex_unlock(&client->lock);
576 EXPORT_SYMBOL(ion_free);
578 int ion_phys(struct ion_client *client, struct ion_handle *handle,
579 ion_phys_addr_t *addr, size_t *len)
581 struct ion_buffer *buffer;
584 mutex_lock(&client->lock);
585 if (!ion_handle_validate(client, handle)) {
586 mutex_unlock(&client->lock);
590 buffer = handle->buffer;
592 if (!buffer->heap->ops->phys) {
593 pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
594 __func__, buffer->heap->name, buffer->heap->type);
595 mutex_unlock(&client->lock);
598 mutex_unlock(&client->lock);
599 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
602 EXPORT_SYMBOL(ion_phys);
604 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
608 if (buffer->kmap_cnt) {
609 if (buffer->kmap_cnt == INT_MAX)
610 return ERR_PTR(-EOVERFLOW);
613 return buffer->vaddr;
615 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
616 if (WARN_ONCE(vaddr == NULL,
617 "heap->ops->map_kernel should return ERR_PTR on error"))
618 return ERR_PTR(-EINVAL);
621 buffer->vaddr = vaddr;
626 static void *ion_handle_kmap_get(struct ion_handle *handle)
628 struct ion_buffer *buffer = handle->buffer;
631 if (handle->kmap_cnt) {
632 if (handle->kmap_cnt == INT_MAX)
633 return ERR_PTR(-EOVERFLOW);
636 return buffer->vaddr;
638 vaddr = ion_buffer_kmap_get(buffer);
645 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
648 if (!buffer->kmap_cnt) {
649 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
650 buffer->vaddr = NULL;
654 static void ion_handle_kmap_put(struct ion_handle *handle)
656 struct ion_buffer *buffer = handle->buffer;
658 if (!handle->kmap_cnt) {
659 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
663 if (!handle->kmap_cnt)
664 ion_buffer_kmap_put(buffer);
667 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
669 struct ion_buffer *buffer;
672 mutex_lock(&client->lock);
673 if (!ion_handle_validate(client, handle)) {
674 pr_err("%s: invalid handle passed to map_kernel.\n",
676 mutex_unlock(&client->lock);
677 return ERR_PTR(-EINVAL);
680 buffer = handle->buffer;
682 if (!handle->buffer->heap->ops->map_kernel) {
683 pr_err("%s: map_kernel is not implemented by this heap.\n",
685 mutex_unlock(&client->lock);
686 return ERR_PTR(-ENODEV);
689 mutex_lock(&buffer->lock);
690 vaddr = ion_handle_kmap_get(handle);
691 mutex_unlock(&buffer->lock);
692 mutex_unlock(&client->lock);
695 EXPORT_SYMBOL(ion_map_kernel);
697 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
699 struct ion_buffer *buffer;
701 mutex_lock(&client->lock);
702 buffer = handle->buffer;
703 mutex_lock(&buffer->lock);
704 ion_handle_kmap_put(handle);
705 mutex_unlock(&buffer->lock);
706 mutex_unlock(&client->lock);
708 EXPORT_SYMBOL(ion_unmap_kernel);
710 static int ion_debug_client_show(struct seq_file *s, void *unused)
712 struct ion_client *client = s->private;
714 size_t sizes[ION_NUM_HEAP_IDS] = {0};
715 const char *names[ION_NUM_HEAP_IDS] = {NULL};
718 mutex_lock(&client->lock);
719 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
720 struct ion_handle *handle = rb_entry(n, struct ion_handle,
722 unsigned int id = handle->buffer->heap->id;
725 names[id] = handle->buffer->heap->name;
726 sizes[id] += handle->buffer->size;
728 mutex_unlock(&client->lock);
730 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
731 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
734 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
739 static int ion_debug_client_open(struct inode *inode, struct file *file)
741 return single_open(file, ion_debug_client_show, inode->i_private);
744 static const struct file_operations debug_client_fops = {
745 .open = ion_debug_client_open,
748 .release = single_release,
751 static int ion_get_client_serial(const struct rb_root *root,
752 const unsigned char *name)
755 struct rb_node *node;
757 for (node = rb_first(root); node; node = rb_next(node)) {
758 struct ion_client *client = rb_entry(node, struct ion_client,
761 if (strcmp(client->name, name))
763 serial = max(serial, client->display_serial);
768 struct ion_client *ion_client_create(struct ion_device *dev,
771 struct ion_client *client;
772 struct task_struct *task;
774 struct rb_node *parent = NULL;
775 struct ion_client *entry;
779 pr_err("%s: Name cannot be null\n", __func__);
780 return ERR_PTR(-EINVAL);
783 get_task_struct(current->group_leader);
784 task_lock(current->group_leader);
785 pid = task_pid_nr(current->group_leader);
787 * don't bother to store task struct for kernel threads,
788 * they can't be killed anyway
790 if (current->group_leader->flags & PF_KTHREAD) {
791 put_task_struct(current->group_leader);
794 task = current->group_leader;
796 task_unlock(current->group_leader);
798 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
800 goto err_put_task_struct;
803 client->handles = RB_ROOT;
804 idr_init(&client->idr);
805 mutex_init(&client->lock);
808 client->name = kstrdup(name, GFP_KERNEL);
810 goto err_free_client;
812 down_write(&dev->lock);
813 client->display_serial = ion_get_client_serial(&dev->clients, name);
814 client->display_name = kasprintf(
815 GFP_KERNEL, "%s-%d", name, client->display_serial);
816 if (!client->display_name) {
817 up_write(&dev->lock);
818 goto err_free_client_name;
820 p = &dev->clients.rb_node;
823 entry = rb_entry(parent, struct ion_client, node);
827 else if (client > entry)
830 rb_link_node(&client->node, parent, p);
831 rb_insert_color(&client->node, &dev->clients);
833 client->debug_root = debugfs_create_file(client->display_name, 0664,
834 dev->clients_debug_root,
835 client, &debug_client_fops);
836 if (!client->debug_root) {
837 char buf[256], *path;
839 path = dentry_path(dev->clients_debug_root, buf, 256);
840 pr_err("Failed to create client debugfs at %s/%s\n",
841 path, client->display_name);
844 up_write(&dev->lock);
848 err_free_client_name:
854 put_task_struct(current->group_leader);
855 return ERR_PTR(-ENOMEM);
857 EXPORT_SYMBOL(ion_client_create);
859 void ion_client_destroy(struct ion_client *client)
861 struct ion_device *dev = client->dev;
864 pr_debug("%s: %d\n", __func__, __LINE__);
865 while ((n = rb_first(&client->handles))) {
866 struct ion_handle *handle = rb_entry(n, struct ion_handle,
868 ion_handle_destroy(&handle->ref);
871 idr_destroy(&client->idr);
873 down_write(&dev->lock);
875 put_task_struct(client->task);
876 rb_erase(&client->node, &dev->clients);
877 debugfs_remove_recursive(client->debug_root);
878 up_write(&dev->lock);
880 kfree(client->display_name);
884 EXPORT_SYMBOL(ion_client_destroy);
886 struct sg_table *ion_sg_table(struct ion_client *client,
887 struct ion_handle *handle)
889 struct ion_buffer *buffer;
890 struct sg_table *table;
892 mutex_lock(&client->lock);
893 if (!ion_handle_validate(client, handle)) {
894 pr_err("%s: invalid handle passed to map_dma.\n",
896 mutex_unlock(&client->lock);
897 return ERR_PTR(-EINVAL);
899 buffer = handle->buffer;
900 table = buffer->sg_table;
901 mutex_unlock(&client->lock);
904 EXPORT_SYMBOL(ion_sg_table);
906 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
908 enum dma_data_direction direction);
910 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
911 enum dma_data_direction direction)
913 struct dma_buf *dmabuf = attachment->dmabuf;
914 struct ion_buffer *buffer = dmabuf->priv;
916 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
917 return buffer->sg_table;
920 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
921 struct sg_table *table,
922 enum dma_data_direction direction)
926 void ion_pages_sync_for_device(struct device *dev, struct page *page,
927 size_t size, enum dma_data_direction dir)
929 struct scatterlist sg;
931 sg_init_table(&sg, 1);
932 sg_set_page(&sg, page, size, 0);
934 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
935 * for the targeted device, but this works on the currently targeted
938 sg_dma_address(&sg) = page_to_phys(page);
939 dma_sync_sg_for_device(dev, &sg, 1, dir);
942 struct ion_vma_list {
943 struct list_head list;
944 struct vm_area_struct *vma;
947 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
949 enum dma_data_direction dir)
951 struct ion_vma_list *vma_list;
952 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
955 pr_debug("%s: syncing for device %s\n", __func__,
956 dev ? dev_name(dev) : "null");
958 if (!ion_buffer_fault_user_mappings(buffer))
961 mutex_lock(&buffer->lock);
962 for (i = 0; i < pages; i++) {
963 struct page *page = buffer->pages[i];
965 if (ion_buffer_page_is_dirty(page))
966 ion_pages_sync_for_device(dev, ion_buffer_page(page),
969 ion_buffer_page_clean(buffer->pages + i);
971 list_for_each_entry(vma_list, &buffer->vmas, list) {
972 struct vm_area_struct *vma = vma_list->vma;
974 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
977 mutex_unlock(&buffer->lock);
980 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
982 struct ion_buffer *buffer = vma->vm_private_data;
986 mutex_lock(&buffer->lock);
987 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
988 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
990 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
991 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
992 mutex_unlock(&buffer->lock);
994 return VM_FAULT_ERROR;
996 return VM_FAULT_NOPAGE;
999 static void ion_vm_open(struct vm_area_struct *vma)
1001 struct ion_buffer *buffer = vma->vm_private_data;
1002 struct ion_vma_list *vma_list;
1004 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1007 vma_list->vma = vma;
1008 mutex_lock(&buffer->lock);
1009 list_add(&vma_list->list, &buffer->vmas);
1010 mutex_unlock(&buffer->lock);
1011 pr_debug("%s: adding %p\n", __func__, vma);
1014 static void ion_vm_close(struct vm_area_struct *vma)
1016 struct ion_buffer *buffer = vma->vm_private_data;
1017 struct ion_vma_list *vma_list, *tmp;
1019 pr_debug("%s\n", __func__);
1020 mutex_lock(&buffer->lock);
1021 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1022 if (vma_list->vma != vma)
1024 list_del(&vma_list->list);
1026 pr_debug("%s: deleting %p\n", __func__, vma);
1029 mutex_unlock(&buffer->lock);
1032 static const struct vm_operations_struct ion_vma_ops = {
1033 .open = ion_vm_open,
1034 .close = ion_vm_close,
1035 .fault = ion_vm_fault,
1038 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1040 struct ion_buffer *buffer = dmabuf->priv;
1043 if (!buffer->heap->ops->map_user) {
1044 pr_err("%s: this heap does not define a method for mapping to userspace\n",
1049 if (ion_buffer_fault_user_mappings(buffer)) {
1050 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1052 vma->vm_private_data = buffer;
1053 vma->vm_ops = &ion_vma_ops;
1058 if (!(buffer->flags & ION_FLAG_CACHED))
1059 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1061 mutex_lock(&buffer->lock);
1062 /* now map it to userspace */
1063 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1064 mutex_unlock(&buffer->lock);
1067 pr_err("%s: failure mapping buffer to userspace\n",
1073 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1075 struct ion_buffer *buffer = dmabuf->priv;
1077 ion_buffer_put(buffer);
1080 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1082 struct ion_buffer *buffer = dmabuf->priv;
1084 return buffer->vaddr + offset * PAGE_SIZE;
1087 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1092 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1094 enum dma_data_direction direction)
1096 struct ion_buffer *buffer = dmabuf->priv;
1099 if (!buffer->heap->ops->map_kernel) {
1100 pr_err("%s: map kernel is not implemented by this heap.\n",
1105 mutex_lock(&buffer->lock);
1106 vaddr = ion_buffer_kmap_get(buffer);
1107 mutex_unlock(&buffer->lock);
1108 return PTR_ERR_OR_ZERO(vaddr);
1111 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1113 enum dma_data_direction direction)
1115 struct ion_buffer *buffer = dmabuf->priv;
1117 mutex_lock(&buffer->lock);
1118 ion_buffer_kmap_put(buffer);
1119 mutex_unlock(&buffer->lock);
1122 static struct dma_buf_ops dma_buf_ops = {
1123 .map_dma_buf = ion_map_dma_buf,
1124 .unmap_dma_buf = ion_unmap_dma_buf,
1126 .release = ion_dma_buf_release,
1127 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1128 .end_cpu_access = ion_dma_buf_end_cpu_access,
1129 .kmap_atomic = ion_dma_buf_kmap,
1130 .kunmap_atomic = ion_dma_buf_kunmap,
1131 .kmap = ion_dma_buf_kmap,
1132 .kunmap = ion_dma_buf_kunmap,
1135 static struct dma_buf *__ion_share_dma_buf(struct ion_client *client,
1136 struct ion_handle *handle,
1139 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1140 struct ion_buffer *buffer;
1141 struct dma_buf *dmabuf;
1145 mutex_lock(&client->lock);
1146 valid_handle = ion_handle_validate(client, handle);
1147 if (!valid_handle) {
1148 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1150 mutex_unlock(&client->lock);
1151 return ERR_PTR(-EINVAL);
1153 buffer = handle->buffer;
1154 ion_buffer_get(buffer);
1156 mutex_unlock(&client->lock);
1158 exp_info.ops = &dma_buf_ops;
1159 exp_info.size = buffer->size;
1160 exp_info.flags = O_RDWR;
1161 exp_info.priv = buffer;
1163 dmabuf = dma_buf_export(&exp_info);
1164 if (IS_ERR(dmabuf)) {
1165 ion_buffer_put(buffer);
1172 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1173 struct ion_handle *handle)
1175 return __ion_share_dma_buf(client, handle, true);
1177 EXPORT_SYMBOL(ion_share_dma_buf);
1179 static int __ion_share_dma_buf_fd(struct ion_client *client,
1180 struct ion_handle *handle, bool lock_client)
1182 struct dma_buf *dmabuf;
1185 dmabuf = __ion_share_dma_buf(client, handle, lock_client);
1187 return PTR_ERR(dmabuf);
1189 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1191 dma_buf_put(dmabuf);
1196 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1198 return __ion_share_dma_buf_fd(client, handle, true);
1200 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1202 static int ion_share_dma_buf_fd_nolock(struct ion_client *client,
1203 struct ion_handle *handle)
1205 return __ion_share_dma_buf_fd(client, handle, false);
1208 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1210 struct dma_buf *dmabuf;
1211 struct ion_buffer *buffer;
1212 struct ion_handle *handle;
1215 dmabuf = dma_buf_get(fd);
1217 return ERR_CAST(dmabuf);
1218 /* if this memory came from ion */
1220 if (dmabuf->ops != &dma_buf_ops) {
1221 pr_err("%s: can not import dmabuf from another exporter\n",
1223 dma_buf_put(dmabuf);
1224 return ERR_PTR(-EINVAL);
1226 buffer = dmabuf->priv;
1228 mutex_lock(&client->lock);
1229 /* if a handle exists for this buffer just take a reference to it */
1230 handle = ion_handle_lookup(client, buffer);
1231 if (!IS_ERR(handle)) {
1232 handle = ion_handle_get_check_overflow(handle);
1233 mutex_unlock(&client->lock);
1237 handle = ion_handle_create(client, buffer);
1238 if (IS_ERR(handle)) {
1239 mutex_unlock(&client->lock);
1243 ret = ion_handle_add(client, handle);
1244 mutex_unlock(&client->lock);
1246 ion_handle_put(handle);
1247 handle = ERR_PTR(ret);
1251 dma_buf_put(dmabuf);
1254 EXPORT_SYMBOL(ion_import_dma_buf);
1256 static int ion_sync_for_device(struct ion_client *client, int fd)
1258 struct dma_buf *dmabuf;
1259 struct ion_buffer *buffer;
1261 dmabuf = dma_buf_get(fd);
1263 return PTR_ERR(dmabuf);
1265 /* if this memory came from ion */
1266 if (dmabuf->ops != &dma_buf_ops) {
1267 pr_err("%s: can not sync dmabuf from another exporter\n",
1269 dma_buf_put(dmabuf);
1272 buffer = dmabuf->priv;
1274 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1275 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1276 dma_buf_put(dmabuf);
1280 /* fix up the cases where the ioctl direction bits are incorrect */
1281 static unsigned int ion_ioctl_dir(unsigned int cmd)
1286 case ION_IOC_CUSTOM:
1289 return _IOC_DIR(cmd);
1293 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1295 struct ion_client *client = filp->private_data;
1296 struct ion_device *dev = client->dev;
1297 struct ion_handle *cleanup_handle = NULL;
1302 struct ion_fd_data fd;
1303 struct ion_allocation_data allocation;
1304 struct ion_handle_data handle;
1305 struct ion_custom_data custom;
1308 dir = ion_ioctl_dir(cmd);
1310 if (_IOC_SIZE(cmd) > sizeof(data))
1313 if (dir & _IOC_WRITE)
1314 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1320 struct ion_handle *handle;
1322 handle = ion_alloc(client, data.allocation.len,
1323 data.allocation.align,
1324 data.allocation.heap_id_mask,
1325 data.allocation.flags);
1327 return PTR_ERR(handle);
1329 data.allocation.handle = handle->id;
1331 cleanup_handle = handle;
1336 struct ion_handle *handle;
1338 mutex_lock(&client->lock);
1339 handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
1340 if (IS_ERR(handle)) {
1341 mutex_unlock(&client->lock);
1342 return PTR_ERR(handle);
1344 ion_free_nolock(client, handle);
1345 ion_handle_put_nolock(handle);
1346 mutex_unlock(&client->lock);
1352 struct ion_handle *handle;
1354 mutex_lock(&client->lock);
1355 handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
1356 if (IS_ERR(handle)) {
1357 mutex_unlock(&client->lock);
1358 return PTR_ERR(handle);
1360 data.fd.fd = ion_share_dma_buf_fd_nolock(client, handle);
1361 ion_handle_put_nolock(handle);
1362 mutex_unlock(&client->lock);
1367 case ION_IOC_IMPORT:
1369 struct ion_handle *handle;
1371 handle = ion_import_dma_buf(client, data.fd.fd);
1373 ret = PTR_ERR(handle);
1375 data.handle.handle = handle->id;
1380 ret = ion_sync_for_device(client, data.fd.fd);
1383 case ION_IOC_CUSTOM:
1385 if (!dev->custom_ioctl)
1387 ret = dev->custom_ioctl(client, data.custom.cmd,
1395 if (dir & _IOC_READ) {
1396 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1398 ion_free(client, cleanup_handle);
1405 static int ion_release(struct inode *inode, struct file *file)
1407 struct ion_client *client = file->private_data;
1409 pr_debug("%s: %d\n", __func__, __LINE__);
1410 ion_client_destroy(client);
1414 static int ion_open(struct inode *inode, struct file *file)
1416 struct miscdevice *miscdev = file->private_data;
1417 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1418 struct ion_client *client;
1419 char debug_name[64];
1421 pr_debug("%s: %d\n", __func__, __LINE__);
1422 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1423 client = ion_client_create(dev, debug_name);
1425 return PTR_ERR(client);
1426 file->private_data = client;
1431 static const struct file_operations ion_fops = {
1432 .owner = THIS_MODULE,
1434 .release = ion_release,
1435 .unlocked_ioctl = ion_ioctl,
1436 .compat_ioctl = compat_ion_ioctl,
1439 static size_t ion_debug_heap_total(struct ion_client *client,
1445 mutex_lock(&client->lock);
1446 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1447 struct ion_handle *handle = rb_entry(n,
1450 if (handle->buffer->heap->id == id)
1451 size += handle->buffer->size;
1453 mutex_unlock(&client->lock);
1457 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1459 struct ion_heap *heap = s->private;
1460 struct ion_device *dev = heap->dev;
1462 size_t total_size = 0;
1463 size_t total_orphaned_size = 0;
1465 seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
1466 seq_puts(s, "----------------------------------------------------\n");
1468 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1469 struct ion_client *client = rb_entry(n, struct ion_client,
1471 size_t size = ion_debug_heap_total(client, heap->id);
1476 char task_comm[TASK_COMM_LEN];
1478 get_task_comm(task_comm, client->task);
1479 seq_printf(s, "%16s %16u %16zu\n", task_comm,
1482 seq_printf(s, "%16s %16u %16zu\n", client->name,
1486 seq_puts(s, "----------------------------------------------------\n");
1487 seq_puts(s, "orphaned allocations (info is from last known client):\n");
1488 mutex_lock(&dev->buffer_lock);
1489 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1490 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1492 if (buffer->heap->id != heap->id)
1494 total_size += buffer->size;
1495 if (!buffer->handle_count) {
1496 seq_printf(s, "%16s %16u %16zu %d %d\n",
1497 buffer->task_comm, buffer->pid,
1498 buffer->size, buffer->kmap_cnt,
1499 atomic_read(&buffer->ref.refcount));
1500 total_orphaned_size += buffer->size;
1503 mutex_unlock(&dev->buffer_lock);
1504 seq_puts(s, "----------------------------------------------------\n");
1505 seq_printf(s, "%16s %16zu\n", "total orphaned",
1506 total_orphaned_size);
1507 seq_printf(s, "%16s %16zu\n", "total ", total_size);
1508 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1509 seq_printf(s, "%16s %16zu\n", "deferred free",
1510 heap->free_list_size);
1511 seq_puts(s, "----------------------------------------------------\n");
1513 if (heap->debug_show)
1514 heap->debug_show(heap, s, unused);
1519 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1521 return single_open(file, ion_debug_heap_show, inode->i_private);
1524 static const struct file_operations debug_heap_fops = {
1525 .open = ion_debug_heap_open,
1527 .llseek = seq_lseek,
1528 .release = single_release,
1531 static int debug_shrink_set(void *data, u64 val)
1533 struct ion_heap *heap = data;
1534 struct shrink_control sc;
1538 sc.nr_to_scan = val;
1541 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1542 sc.nr_to_scan = objs;
1545 heap->shrinker.scan_objects(&heap->shrinker, &sc);
1549 static int debug_shrink_get(void *data, u64 *val)
1551 struct ion_heap *heap = data;
1552 struct shrink_control sc;
1558 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1563 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1564 debug_shrink_set, "%llu\n");
1566 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1568 struct dentry *debug_file;
1570 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1571 !heap->ops->unmap_dma)
1572 pr_err("%s: can not add heap with invalid ops struct.\n",
1575 spin_lock_init(&heap->free_lock);
1576 heap->free_list_size = 0;
1578 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1579 ion_heap_init_deferred_free(heap);
1581 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1582 ion_heap_init_shrinker(heap);
1585 down_write(&dev->lock);
1587 * use negative heap->id to reverse the priority -- when traversing
1588 * the list later attempt higher id numbers first
1590 plist_node_init(&heap->node, -heap->id);
1591 plist_add(&heap->node, &dev->heaps);
1592 debug_file = debugfs_create_file(heap->name, 0664,
1593 dev->heaps_debug_root, heap,
1597 char buf[256], *path;
1599 path = dentry_path(dev->heaps_debug_root, buf, 256);
1600 pr_err("Failed to create heap debugfs at %s/%s\n",
1604 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
1605 char debug_name[64];
1607 snprintf(debug_name, 64, "%s_shrink", heap->name);
1608 debug_file = debugfs_create_file(
1609 debug_name, 0644, dev->heaps_debug_root, heap,
1610 &debug_shrink_fops);
1612 char buf[256], *path;
1614 path = dentry_path(dev->heaps_debug_root, buf, 256);
1615 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1620 up_write(&dev->lock);
1622 EXPORT_SYMBOL(ion_device_add_heap);
1624 struct ion_device *ion_device_create(long (*custom_ioctl)
1625 (struct ion_client *client,
1629 struct ion_device *idev;
1632 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1634 return ERR_PTR(-ENOMEM);
1636 idev->dev.minor = MISC_DYNAMIC_MINOR;
1637 idev->dev.name = "ion";
1638 idev->dev.fops = &ion_fops;
1639 idev->dev.parent = NULL;
1640 ret = misc_register(&idev->dev);
1642 pr_err("ion: failed to register misc device.\n");
1644 return ERR_PTR(ret);
1647 idev->debug_root = debugfs_create_dir("ion", NULL);
1648 if (!idev->debug_root) {
1649 pr_err("ion: failed to create debugfs root directory.\n");
1652 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1653 if (!idev->heaps_debug_root) {
1654 pr_err("ion: failed to create debugfs heaps directory.\n");
1657 idev->clients_debug_root = debugfs_create_dir("clients",
1659 if (!idev->clients_debug_root)
1660 pr_err("ion: failed to create debugfs clients directory.\n");
1664 idev->custom_ioctl = custom_ioctl;
1665 idev->buffers = RB_ROOT;
1666 mutex_init(&idev->buffer_lock);
1667 init_rwsem(&idev->lock);
1668 plist_head_init(&idev->heaps);
1669 idev->clients = RB_ROOT;
1672 EXPORT_SYMBOL(ion_device_create);
1674 void ion_device_destroy(struct ion_device *dev)
1676 misc_deregister(&dev->dev);
1677 debugfs_remove_recursive(dev->debug_root);
1678 /* XXX need to free the heaps and clients ? */
1681 EXPORT_SYMBOL(ion_device_destroy);
1683 void __init ion_reserve(struct ion_platform_data *data)
1687 for (i = 0; i < data->nr; i++) {
1688 if (data->heaps[i].size == 0)
1691 if (data->heaps[i].base == 0) {
1694 paddr = memblock_alloc_base(data->heaps[i].size,
1695 data->heaps[i].align,
1696 MEMBLOCK_ALLOC_ANYWHERE);
1698 pr_err("%s: error allocating memblock for heap %d\n",
1702 data->heaps[i].base = paddr;
1704 int ret = memblock_reserve(data->heaps[i].base,
1705 data->heaps[i].size);
1707 pr_err("memblock reserve of %zx@%lx failed\n",
1708 data->heaps[i].size,
1709 data->heaps[i].base);
1711 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1712 data->heaps[i].name,
1713 data->heaps[i].base,
1714 data->heaps[i].size);