GNU Linux-libre 4.9.317-gnu1
[releases.git] / drivers / staging / android / ion / ion.c
1 /*
2  *
3  * drivers/staging/android/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/atomic.h>
19 #include <linux/device.h>
20 #include <linux/err.h>
21 #include <linux/file.h>
22 #include <linux/freezer.h>
23 #include <linux/fs.h>
24 #include <linux/anon_inodes.h>
25 #include <linux/kthread.h>
26 #include <linux/list.h>
27 #include <linux/memblock.h>
28 #include <linux/miscdevice.h>
29 #include <linux/export.h>
30 #include <linux/mm.h>
31 #include <linux/mm_types.h>
32 #include <linux/rbtree.h>
33 #include <linux/slab.h>
34 #include <linux/seq_file.h>
35 #include <linux/uaccess.h>
36 #include <linux/vmalloc.h>
37 #include <linux/debugfs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/idr.h>
40
41 #include "ion.h"
42 #include "ion_priv.h"
43 #include "compat_ion.h"
44
45 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
46 {
47         return (buffer->flags & ION_FLAG_CACHED) &&
48                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
49 }
50
51 bool ion_buffer_cached(struct ion_buffer *buffer)
52 {
53         return !!(buffer->flags & ION_FLAG_CACHED);
54 }
55
56 static inline struct page *ion_buffer_page(struct page *page)
57 {
58         return (struct page *)((unsigned long)page & ~(1UL));
59 }
60
61 static inline bool ion_buffer_page_is_dirty(struct page *page)
62 {
63         return !!((unsigned long)page & 1UL);
64 }
65
66 static inline void ion_buffer_page_dirty(struct page **page)
67 {
68         *page = (struct page *)((unsigned long)(*page) | 1UL);
69 }
70
71 static inline void ion_buffer_page_clean(struct page **page)
72 {
73         *page = (struct page *)((unsigned long)(*page) & ~(1UL));
74 }
75
76 /* this function should only be called while dev->lock is held */
77 static void ion_buffer_add(struct ion_device *dev,
78                            struct ion_buffer *buffer)
79 {
80         struct rb_node **p = &dev->buffers.rb_node;
81         struct rb_node *parent = NULL;
82         struct ion_buffer *entry;
83
84         while (*p) {
85                 parent = *p;
86                 entry = rb_entry(parent, struct ion_buffer, node);
87
88                 if (buffer < entry) {
89                         p = &(*p)->rb_left;
90                 } else if (buffer > entry) {
91                         p = &(*p)->rb_right;
92                 } else {
93                         pr_err("%s: buffer already found.", __func__);
94                         BUG();
95                 }
96         }
97
98         rb_link_node(&buffer->node, parent, p);
99         rb_insert_color(&buffer->node, &dev->buffers);
100 }
101
102 /* this function should only be called while dev->lock is held */
103 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
104                                             struct ion_device *dev,
105                                             unsigned long len,
106                                             unsigned long align,
107                                             unsigned long flags)
108 {
109         struct ion_buffer *buffer;
110         struct sg_table *table;
111         struct scatterlist *sg;
112         int i, ret;
113
114         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
115         if (!buffer)
116                 return ERR_PTR(-ENOMEM);
117
118         buffer->heap = heap;
119         buffer->flags = flags;
120         kref_init(&buffer->ref);
121
122         ret = heap->ops->allocate(heap, buffer, len, align, flags);
123
124         if (ret) {
125                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
126                         goto err2;
127
128                 ion_heap_freelist_drain(heap, 0);
129                 ret = heap->ops->allocate(heap, buffer, len, align,
130                                           flags);
131                 if (ret)
132                         goto err2;
133         }
134
135         if (buffer->sg_table == NULL) {
136                 WARN_ONCE(1, "This heap needs to set the sgtable");
137                 ret = -EINVAL;
138                 goto err1;
139         }
140
141         table = buffer->sg_table;
142         buffer->dev = dev;
143         buffer->size = len;
144
145         if (ion_buffer_fault_user_mappings(buffer)) {
146                 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
147                 struct scatterlist *sg;
148                 int i, j, k = 0;
149
150                 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
151                 if (!buffer->pages) {
152                         ret = -ENOMEM;
153                         goto err1;
154                 }
155
156                 for_each_sg(table->sgl, sg, table->nents, i) {
157                         struct page *page = sg_page(sg);
158
159                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
160                                 buffer->pages[k++] = page++;
161                 }
162         }
163
164         buffer->dev = dev;
165         buffer->size = len;
166         INIT_LIST_HEAD(&buffer->vmas);
167         mutex_init(&buffer->lock);
168         /*
169          * this will set up dma addresses for the sglist -- it is not
170          * technically correct as per the dma api -- a specific
171          * device isn't really taking ownership here.  However, in practice on
172          * our systems the only dma_address space is physical addresses.
173          * Additionally, we can't afford the overhead of invalidating every
174          * allocation via dma_map_sg. The implicit contract here is that
175          * memory coming from the heaps is ready for dma, ie if it has a
176          * cached mapping that mapping has been invalidated
177          */
178         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
179                 sg_dma_address(sg) = sg_phys(sg);
180                 sg_dma_len(sg) = sg->length;
181         }
182         mutex_lock(&dev->buffer_lock);
183         ion_buffer_add(dev, buffer);
184         mutex_unlock(&dev->buffer_lock);
185         return buffer;
186
187 err1:
188         heap->ops->free(buffer);
189 err2:
190         kfree(buffer);
191         return ERR_PTR(ret);
192 }
193
194 void ion_buffer_destroy(struct ion_buffer *buffer)
195 {
196         if (buffer->kmap_cnt > 0) {
197                 pr_warn_once("%s: buffer still mapped in the kernel\n",
198                              __func__);
199                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
200         }
201         buffer->heap->ops->free(buffer);
202         vfree(buffer->pages);
203         kfree(buffer);
204 }
205
206 static void _ion_buffer_destroy(struct kref *kref)
207 {
208         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
209         struct ion_heap *heap = buffer->heap;
210         struct ion_device *dev = buffer->dev;
211
212         mutex_lock(&dev->buffer_lock);
213         rb_erase(&buffer->node, &dev->buffers);
214         mutex_unlock(&dev->buffer_lock);
215
216         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
217                 ion_heap_freelist_add(heap, buffer);
218         else
219                 ion_buffer_destroy(buffer);
220 }
221
222 static void ion_buffer_get(struct ion_buffer *buffer)
223 {
224         kref_get(&buffer->ref);
225 }
226
227 static int ion_buffer_put(struct ion_buffer *buffer)
228 {
229         return kref_put(&buffer->ref, _ion_buffer_destroy);
230 }
231
232 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
233 {
234         mutex_lock(&buffer->lock);
235         buffer->handle_count++;
236         mutex_unlock(&buffer->lock);
237 }
238
239 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
240 {
241         /*
242          * when a buffer is removed from a handle, if it is not in
243          * any other handles, copy the taskcomm and the pid of the
244          * process it's being removed from into the buffer.  At this
245          * point there will be no way to track what processes this buffer is
246          * being used by, it only exists as a dma_buf file descriptor.
247          * The taskcomm and pid can provide a debug hint as to where this fd
248          * is in the system
249          */
250         mutex_lock(&buffer->lock);
251         buffer->handle_count--;
252         BUG_ON(buffer->handle_count < 0);
253         if (!buffer->handle_count) {
254                 struct task_struct *task;
255
256                 task = current->group_leader;
257                 get_task_comm(buffer->task_comm, task);
258                 buffer->pid = task_pid_nr(task);
259         }
260         mutex_unlock(&buffer->lock);
261 }
262
263 static struct ion_handle *ion_handle_create(struct ion_client *client,
264                                             struct ion_buffer *buffer)
265 {
266         struct ion_handle *handle;
267
268         handle = kzalloc(sizeof(*handle), GFP_KERNEL);
269         if (!handle)
270                 return ERR_PTR(-ENOMEM);
271         kref_init(&handle->ref);
272         RB_CLEAR_NODE(&handle->node);
273         handle->client = client;
274         ion_buffer_get(buffer);
275         ion_buffer_add_to_handle(buffer);
276         handle->buffer = buffer;
277
278         return handle;
279 }
280
281 static void ion_handle_kmap_put(struct ion_handle *);
282
283 static void ion_handle_destroy(struct kref *kref)
284 {
285         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
286         struct ion_client *client = handle->client;
287         struct ion_buffer *buffer = handle->buffer;
288
289         mutex_lock(&buffer->lock);
290         while (handle->kmap_cnt)
291                 ion_handle_kmap_put(handle);
292         mutex_unlock(&buffer->lock);
293
294         idr_remove(&client->idr, handle->id);
295         if (!RB_EMPTY_NODE(&handle->node))
296                 rb_erase(&handle->node, &client->handles);
297
298         ion_buffer_remove_from_handle(buffer);
299         ion_buffer_put(buffer);
300
301         kfree(handle);
302 }
303
304 static void ion_handle_get(struct ion_handle *handle)
305 {
306         kref_get(&handle->ref);
307 }
308
309 /* Must hold the client lock */
310 static struct ion_handle *ion_handle_get_check_overflow(
311                                         struct ion_handle *handle)
312 {
313         if (atomic_read(&handle->ref.refcount) + 1 == 0)
314                 return ERR_PTR(-EOVERFLOW);
315         ion_handle_get(handle);
316         return handle;
317 }
318
319 int ion_handle_put_nolock(struct ion_handle *handle)
320 {
321         return kref_put(&handle->ref, ion_handle_destroy);
322 }
323
324 int ion_handle_put(struct ion_handle *handle)
325 {
326         struct ion_client *client = handle->client;
327         int ret;
328
329         mutex_lock(&client->lock);
330         ret = ion_handle_put_nolock(handle);
331         mutex_unlock(&client->lock);
332
333         return ret;
334 }
335
336 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
337                                             struct ion_buffer *buffer)
338 {
339         struct rb_node *n = client->handles.rb_node;
340
341         while (n) {
342                 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
343
344                 if (buffer < entry->buffer)
345                         n = n->rb_left;
346                 else if (buffer > entry->buffer)
347                         n = n->rb_right;
348                 else
349                         return entry;
350         }
351         return ERR_PTR(-EINVAL);
352 }
353
354 struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
355                                                int id)
356 {
357         struct ion_handle *handle;
358
359         handle = idr_find(&client->idr, id);
360         if (handle)
361                 return ion_handle_get_check_overflow(handle);
362
363         return ERR_PTR(-EINVAL);
364 }
365
366 bool ion_handle_validate(struct ion_client *client,
367                          struct ion_handle *handle)
368 {
369         WARN_ON(!mutex_is_locked(&client->lock));
370         return idr_find(&client->idr, handle->id) == handle;
371 }
372
373 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
374 {
375         int id;
376         struct rb_node **p = &client->handles.rb_node;
377         struct rb_node *parent = NULL;
378         struct ion_handle *entry;
379
380         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
381         if (id < 0)
382                 return id;
383
384         handle->id = id;
385
386         while (*p) {
387                 parent = *p;
388                 entry = rb_entry(parent, struct ion_handle, node);
389
390                 if (handle->buffer < entry->buffer)
391                         p = &(*p)->rb_left;
392                 else if (handle->buffer > entry->buffer)
393                         p = &(*p)->rb_right;
394                 else
395                         WARN(1, "%s: buffer already found.", __func__);
396         }
397
398         rb_link_node(&handle->node, parent, p);
399         rb_insert_color(&handle->node, &client->handles);
400
401         return 0;
402 }
403
404 struct ion_handle *__ion_alloc(struct ion_client *client, size_t len,
405                                size_t align, unsigned int heap_id_mask,
406                                unsigned int flags, bool grab_handle)
407 {
408         struct ion_handle *handle;
409         struct ion_device *dev = client->dev;
410         struct ion_buffer *buffer = NULL;
411         struct ion_heap *heap;
412         int ret;
413
414         pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
415                  len, align, heap_id_mask, flags);
416         /*
417          * traverse the list of heaps available in this system in priority
418          * order.  If the heap type is supported by the client, and matches the
419          * request of the caller allocate from it.  Repeat until allocate has
420          * succeeded or all heaps have been tried
421          */
422         len = PAGE_ALIGN(len);
423
424         if (!len)
425                 return ERR_PTR(-EINVAL);
426
427         down_read(&dev->lock);
428         plist_for_each_entry(heap, &dev->heaps, node) {
429                 /* if the caller didn't specify this heap id */
430                 if (!((1 << heap->id) & heap_id_mask))
431                         continue;
432                 buffer = ion_buffer_create(heap, dev, len, align, flags);
433                 if (!IS_ERR(buffer))
434                         break;
435         }
436         up_read(&dev->lock);
437
438         if (buffer == NULL)
439                 return ERR_PTR(-ENODEV);
440
441         if (IS_ERR(buffer))
442                 return ERR_CAST(buffer);
443
444         handle = ion_handle_create(client, buffer);
445
446         /*
447          * ion_buffer_create will create a buffer with a ref_cnt of 1,
448          * and ion_handle_create will take a second reference, drop one here
449          */
450         ion_buffer_put(buffer);
451
452         if (IS_ERR(handle))
453                 return handle;
454
455         mutex_lock(&client->lock);
456         if (grab_handle)
457                 ion_handle_get(handle);
458         ret = ion_handle_add(client, handle);
459         mutex_unlock(&client->lock);
460         if (ret) {
461                 ion_handle_put(handle);
462                 handle = ERR_PTR(ret);
463         }
464
465         return handle;
466 }
467
468 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
469                              size_t align, unsigned int heap_id_mask,
470                              unsigned int flags)
471 {
472         return __ion_alloc(client, len, align, heap_id_mask, flags, false);
473 }
474 EXPORT_SYMBOL(ion_alloc);
475
476 void ion_free_nolock(struct ion_client *client,
477                      struct ion_handle *handle)
478 {
479         if (!ion_handle_validate(client, handle)) {
480                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
481                 return;
482         }
483         ion_handle_put_nolock(handle);
484 }
485
486 void ion_free(struct ion_client *client, struct ion_handle *handle)
487 {
488         BUG_ON(client != handle->client);
489
490         mutex_lock(&client->lock);
491         ion_free_nolock(client, handle);
492         mutex_unlock(&client->lock);
493 }
494 EXPORT_SYMBOL(ion_free);
495
496 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
497 {
498         void *vaddr;
499
500         if (buffer->kmap_cnt) {
501                 if (buffer->kmap_cnt == INT_MAX)
502                         return ERR_PTR(-EOVERFLOW);
503
504                 buffer->kmap_cnt++;
505                 return buffer->vaddr;
506         }
507         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
508         if (WARN_ONCE(vaddr == NULL,
509                       "heap->ops->map_kernel should return ERR_PTR on error"))
510                 return ERR_PTR(-EINVAL);
511         if (IS_ERR(vaddr))
512                 return vaddr;
513         buffer->vaddr = vaddr;
514         buffer->kmap_cnt++;
515         return vaddr;
516 }
517
518 static void *ion_handle_kmap_get(struct ion_handle *handle)
519 {
520         struct ion_buffer *buffer = handle->buffer;
521         void *vaddr;
522
523         if (handle->kmap_cnt) {
524                 if (handle->kmap_cnt == INT_MAX)
525                         return ERR_PTR(-EOVERFLOW);
526
527                 handle->kmap_cnt++;
528                 return buffer->vaddr;
529         }
530         vaddr = ion_buffer_kmap_get(buffer);
531         if (IS_ERR(vaddr))
532                 return vaddr;
533         handle->kmap_cnt++;
534         return vaddr;
535 }
536
537 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
538 {
539         buffer->kmap_cnt--;
540         if (!buffer->kmap_cnt) {
541                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
542                 buffer->vaddr = NULL;
543         }
544 }
545
546 static void ion_handle_kmap_put(struct ion_handle *handle)
547 {
548         struct ion_buffer *buffer = handle->buffer;
549
550         if (!handle->kmap_cnt) {
551                 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
552                 return;
553         }
554         handle->kmap_cnt--;
555         if (!handle->kmap_cnt)
556                 ion_buffer_kmap_put(buffer);
557 }
558
559 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
560 {
561         struct ion_buffer *buffer;
562         void *vaddr;
563
564         mutex_lock(&client->lock);
565         if (!ion_handle_validate(client, handle)) {
566                 pr_err("%s: invalid handle passed to map_kernel.\n",
567                        __func__);
568                 mutex_unlock(&client->lock);
569                 return ERR_PTR(-EINVAL);
570         }
571
572         buffer = handle->buffer;
573
574         if (!handle->buffer->heap->ops->map_kernel) {
575                 pr_err("%s: map_kernel is not implemented by this heap.\n",
576                        __func__);
577                 mutex_unlock(&client->lock);
578                 return ERR_PTR(-ENODEV);
579         }
580
581         mutex_lock(&buffer->lock);
582         vaddr = ion_handle_kmap_get(handle);
583         mutex_unlock(&buffer->lock);
584         mutex_unlock(&client->lock);
585         return vaddr;
586 }
587 EXPORT_SYMBOL(ion_map_kernel);
588
589 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
590 {
591         struct ion_buffer *buffer;
592
593         mutex_lock(&client->lock);
594         buffer = handle->buffer;
595         mutex_lock(&buffer->lock);
596         ion_handle_kmap_put(handle);
597         mutex_unlock(&buffer->lock);
598         mutex_unlock(&client->lock);
599 }
600 EXPORT_SYMBOL(ion_unmap_kernel);
601
602 static struct mutex debugfs_mutex;
603 static struct rb_root *ion_root_client;
604 static int is_client_alive(struct ion_client *client)
605 {
606         struct rb_node *node;
607         struct ion_client *tmp;
608         struct ion_device *dev;
609
610         node = ion_root_client->rb_node;
611         dev = container_of(ion_root_client, struct ion_device, clients);
612
613         down_read(&dev->lock);
614         while (node) {
615                 tmp = rb_entry(node, struct ion_client, node);
616                 if (client < tmp) {
617                         node = node->rb_left;
618                 } else if (client > tmp) {
619                         node = node->rb_right;
620                 } else {
621                         up_read(&dev->lock);
622                         return 1;
623                 }
624         }
625
626         up_read(&dev->lock);
627         return 0;
628 }
629
630 static int ion_debug_client_show(struct seq_file *s, void *unused)
631 {
632         struct ion_client *client = s->private;
633         struct rb_node *n;
634         size_t sizes[ION_NUM_HEAP_IDS] = {0};
635         const char *names[ION_NUM_HEAP_IDS] = {NULL};
636         int i;
637
638         mutex_lock(&debugfs_mutex);
639         if (!is_client_alive(client)) {
640                 seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
641                            client);
642                 mutex_unlock(&debugfs_mutex);
643                 return 0;
644         }
645
646         mutex_lock(&client->lock);
647         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
648                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
649                                                      node);
650                 unsigned int id = handle->buffer->heap->id;
651
652                 if (!names[id])
653                         names[id] = handle->buffer->heap->name;
654                 sizes[id] += handle->buffer->size;
655         }
656         mutex_unlock(&client->lock);
657         mutex_unlock(&debugfs_mutex);
658
659         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
660         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
661                 if (!names[i])
662                         continue;
663                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
664         }
665         return 0;
666 }
667
668 static int ion_debug_client_open(struct inode *inode, struct file *file)
669 {
670         return single_open(file, ion_debug_client_show, inode->i_private);
671 }
672
673 static const struct file_operations debug_client_fops = {
674         .open = ion_debug_client_open,
675         .read = seq_read,
676         .llseek = seq_lseek,
677         .release = single_release,
678 };
679
680 static int ion_get_client_serial(const struct rb_root *root,
681                                  const unsigned char *name)
682 {
683         int serial = -1;
684         struct rb_node *node;
685
686         for (node = rb_first(root); node; node = rb_next(node)) {
687                 struct ion_client *client = rb_entry(node, struct ion_client,
688                                                      node);
689
690                 if (strcmp(client->name, name))
691                         continue;
692                 serial = max(serial, client->display_serial);
693         }
694         return serial + 1;
695 }
696
697 struct ion_client *ion_client_create(struct ion_device *dev,
698                                      const char *name)
699 {
700         struct ion_client *client;
701         struct task_struct *task;
702         struct rb_node **p;
703         struct rb_node *parent = NULL;
704         struct ion_client *entry;
705         pid_t pid;
706
707         if (!name) {
708                 pr_err("%s: Name cannot be null\n", __func__);
709                 return ERR_PTR(-EINVAL);
710         }
711
712         get_task_struct(current->group_leader);
713         task_lock(current->group_leader);
714         pid = task_pid_nr(current->group_leader);
715         /*
716          * don't bother to store task struct for kernel threads,
717          * they can't be killed anyway
718          */
719         if (current->group_leader->flags & PF_KTHREAD) {
720                 put_task_struct(current->group_leader);
721                 task = NULL;
722         } else {
723                 task = current->group_leader;
724         }
725         task_unlock(current->group_leader);
726
727         client = kzalloc(sizeof(*client), GFP_KERNEL);
728         if (!client)
729                 goto err_put_task_struct;
730
731         client->dev = dev;
732         client->handles = RB_ROOT;
733         idr_init(&client->idr);
734         mutex_init(&client->lock);
735         client->task = task;
736         client->pid = pid;
737         client->name = kstrdup(name, GFP_KERNEL);
738         if (!client->name)
739                 goto err_free_client;
740
741         down_write(&dev->lock);
742         client->display_serial = ion_get_client_serial(&dev->clients, name);
743         client->display_name = kasprintf(
744                 GFP_KERNEL, "%s-%d", name, client->display_serial);
745         if (!client->display_name) {
746                 up_write(&dev->lock);
747                 goto err_free_client_name;
748         }
749         p = &dev->clients.rb_node;
750         while (*p) {
751                 parent = *p;
752                 entry = rb_entry(parent, struct ion_client, node);
753
754                 if (client < entry)
755                         p = &(*p)->rb_left;
756                 else if (client > entry)
757                         p = &(*p)->rb_right;
758         }
759         rb_link_node(&client->node, parent, p);
760         rb_insert_color(&client->node, &dev->clients);
761
762         client->debug_root = debugfs_create_file(client->display_name, 0664,
763                                                  dev->clients_debug_root,
764                                                  client, &debug_client_fops);
765         if (!client->debug_root) {
766                 char buf[256], *path;
767
768                 path = dentry_path(dev->clients_debug_root, buf, 256);
769                 pr_err("Failed to create client debugfs at %s/%s\n",
770                        path, client->display_name);
771         }
772
773         up_write(&dev->lock);
774
775         return client;
776
777 err_free_client_name:
778         kfree(client->name);
779 err_free_client:
780         kfree(client);
781 err_put_task_struct:
782         if (task)
783                 put_task_struct(current->group_leader);
784         return ERR_PTR(-ENOMEM);
785 }
786 EXPORT_SYMBOL(ion_client_create);
787
788 void ion_client_destroy(struct ion_client *client)
789 {
790         struct ion_device *dev = client->dev;
791         struct rb_node *n;
792
793         pr_debug("%s: %d\n", __func__, __LINE__);
794         mutex_lock(&debugfs_mutex);
795         while ((n = rb_first(&client->handles))) {
796                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
797                                                      node);
798                 ion_handle_destroy(&handle->ref);
799         }
800
801         idr_destroy(&client->idr);
802
803         down_write(&dev->lock);
804         if (client->task)
805                 put_task_struct(client->task);
806         rb_erase(&client->node, &dev->clients);
807         debugfs_remove_recursive(client->debug_root);
808         up_write(&dev->lock);
809
810         kfree(client->display_name);
811         kfree(client->name);
812         kfree(client);
813         mutex_unlock(&debugfs_mutex);
814 }
815 EXPORT_SYMBOL(ion_client_destroy);
816
817 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
818                                        struct device *dev,
819                                        enum dma_data_direction direction);
820
821 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
822                                         enum dma_data_direction direction)
823 {
824         struct dma_buf *dmabuf = attachment->dmabuf;
825         struct ion_buffer *buffer = dmabuf->priv;
826
827         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
828         return buffer->sg_table;
829 }
830
831 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
832                               struct sg_table *table,
833                               enum dma_data_direction direction)
834 {
835 }
836
837 void ion_pages_sync_for_device(struct device *dev, struct page *page,
838                                size_t size, enum dma_data_direction dir)
839 {
840         struct scatterlist sg;
841
842         sg_init_table(&sg, 1);
843         sg_set_page(&sg, page, size, 0);
844         /*
845          * This is not correct - sg_dma_address needs a dma_addr_t that is valid
846          * for the targeted device, but this works on the currently targeted
847          * hardware.
848          */
849         sg_dma_address(&sg) = page_to_phys(page);
850         dma_sync_sg_for_device(dev, &sg, 1, dir);
851 }
852
853 struct ion_vma_list {
854         struct list_head list;
855         struct vm_area_struct *vma;
856 };
857
858 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
859                                        struct device *dev,
860                                        enum dma_data_direction dir)
861 {
862         struct ion_vma_list *vma_list;
863         int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
864         int i;
865
866         pr_debug("%s: syncing for device %s\n", __func__,
867                  dev ? dev_name(dev) : "null");
868
869         if (!ion_buffer_fault_user_mappings(buffer))
870                 return;
871
872         mutex_lock(&buffer->lock);
873         for (i = 0; i < pages; i++) {
874                 struct page *page = buffer->pages[i];
875
876                 if (ion_buffer_page_is_dirty(page))
877                         ion_pages_sync_for_device(dev, ion_buffer_page(page),
878                                                   PAGE_SIZE, dir);
879
880                 ion_buffer_page_clean(buffer->pages + i);
881         }
882         list_for_each_entry(vma_list, &buffer->vmas, list) {
883                 struct vm_area_struct *vma = vma_list->vma;
884
885                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
886                                NULL);
887         }
888         mutex_unlock(&buffer->lock);
889 }
890
891 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
892 {
893         struct ion_buffer *buffer = vma->vm_private_data;
894         unsigned long pfn;
895         int ret;
896
897         mutex_lock(&buffer->lock);
898         ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
899         BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
900
901         pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
902         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
903         mutex_unlock(&buffer->lock);
904         if (ret)
905                 return VM_FAULT_ERROR;
906
907         return VM_FAULT_NOPAGE;
908 }
909
910 static void ion_vm_open(struct vm_area_struct *vma)
911 {
912         struct ion_buffer *buffer = vma->vm_private_data;
913         struct ion_vma_list *vma_list;
914
915         vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
916         if (!vma_list)
917                 return;
918         vma_list->vma = vma;
919         mutex_lock(&buffer->lock);
920         list_add(&vma_list->list, &buffer->vmas);
921         mutex_unlock(&buffer->lock);
922         pr_debug("%s: adding %p\n", __func__, vma);
923 }
924
925 static void ion_vm_close(struct vm_area_struct *vma)
926 {
927         struct ion_buffer *buffer = vma->vm_private_data;
928         struct ion_vma_list *vma_list, *tmp;
929
930         pr_debug("%s\n", __func__);
931         mutex_lock(&buffer->lock);
932         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
933                 if (vma_list->vma != vma)
934                         continue;
935                 list_del(&vma_list->list);
936                 kfree(vma_list);
937                 pr_debug("%s: deleting %p\n", __func__, vma);
938                 break;
939         }
940         mutex_unlock(&buffer->lock);
941 }
942
943 static const struct vm_operations_struct ion_vma_ops = {
944         .open = ion_vm_open,
945         .close = ion_vm_close,
946         .fault = ion_vm_fault,
947 };
948
949 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
950 {
951         struct ion_buffer *buffer = dmabuf->priv;
952         int ret = 0;
953
954         if (!buffer->heap->ops->map_user) {
955                 pr_err("%s: this heap does not define a method for mapping to userspace\n",
956                        __func__);
957                 return -EINVAL;
958         }
959
960         if (ion_buffer_fault_user_mappings(buffer)) {
961                 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
962                                                         VM_DONTDUMP;
963                 vma->vm_private_data = buffer;
964                 vma->vm_ops = &ion_vma_ops;
965                 ion_vm_open(vma);
966                 return 0;
967         }
968
969         if (!(buffer->flags & ION_FLAG_CACHED))
970                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
971
972         mutex_lock(&buffer->lock);
973         /* now map it to userspace */
974         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
975         mutex_unlock(&buffer->lock);
976
977         if (ret)
978                 pr_err("%s: failure mapping buffer to userspace\n",
979                        __func__);
980
981         return ret;
982 }
983
984 static void ion_dma_buf_release(struct dma_buf *dmabuf)
985 {
986         struct ion_buffer *buffer = dmabuf->priv;
987
988         ion_buffer_put(buffer);
989 }
990
991 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
992 {
993         struct ion_buffer *buffer = dmabuf->priv;
994
995         return buffer->vaddr + offset * PAGE_SIZE;
996 }
997
998 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
999                                void *ptr)
1000 {
1001 }
1002
1003 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1004                                         enum dma_data_direction direction)
1005 {
1006         struct ion_buffer *buffer = dmabuf->priv;
1007         void *vaddr;
1008
1009         if (!buffer->heap->ops->map_kernel) {
1010                 pr_err("%s: map kernel is not implemented by this heap.\n",
1011                        __func__);
1012                 return -ENODEV;
1013         }
1014
1015         mutex_lock(&buffer->lock);
1016         vaddr = ion_buffer_kmap_get(buffer);
1017         mutex_unlock(&buffer->lock);
1018         return PTR_ERR_OR_ZERO(vaddr);
1019 }
1020
1021 static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1022                                       enum dma_data_direction direction)
1023 {
1024         struct ion_buffer *buffer = dmabuf->priv;
1025
1026         mutex_lock(&buffer->lock);
1027         ion_buffer_kmap_put(buffer);
1028         mutex_unlock(&buffer->lock);
1029
1030         return 0;
1031 }
1032
1033 static struct dma_buf_ops dma_buf_ops = {
1034         .map_dma_buf = ion_map_dma_buf,
1035         .unmap_dma_buf = ion_unmap_dma_buf,
1036         .mmap = ion_mmap,
1037         .release = ion_dma_buf_release,
1038         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1039         .end_cpu_access = ion_dma_buf_end_cpu_access,
1040         .kmap_atomic = ion_dma_buf_kmap,
1041         .kunmap_atomic = ion_dma_buf_kunmap,
1042         .kmap = ion_dma_buf_kmap,
1043         .kunmap = ion_dma_buf_kunmap,
1044 };
1045
1046 static struct dma_buf *__ion_share_dma_buf(struct ion_client *client,
1047                                            struct ion_handle *handle,
1048                                            bool lock_client)
1049 {
1050         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1051         struct ion_buffer *buffer;
1052         struct dma_buf *dmabuf;
1053         bool valid_handle;
1054
1055         if (lock_client)
1056                 mutex_lock(&client->lock);
1057         valid_handle = ion_handle_validate(client, handle);
1058         if (!valid_handle) {
1059                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1060                 if (lock_client)
1061                         mutex_unlock(&client->lock);
1062                 return ERR_PTR(-EINVAL);
1063         }
1064         buffer = handle->buffer;
1065         ion_buffer_get(buffer);
1066         if (lock_client)
1067                 mutex_unlock(&client->lock);
1068
1069         exp_info.ops = &dma_buf_ops;
1070         exp_info.size = buffer->size;
1071         exp_info.flags = O_RDWR;
1072         exp_info.priv = buffer;
1073
1074         dmabuf = dma_buf_export(&exp_info);
1075         if (IS_ERR(dmabuf)) {
1076                 ion_buffer_put(buffer);
1077                 return dmabuf;
1078         }
1079
1080         return dmabuf;
1081 }
1082
1083 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1084                                   struct ion_handle *handle)
1085 {
1086         return __ion_share_dma_buf(client, handle, true);
1087 }
1088 EXPORT_SYMBOL(ion_share_dma_buf);
1089
1090 static int __ion_share_dma_buf_fd(struct ion_client *client,
1091                                   struct ion_handle *handle, bool lock_client)
1092 {
1093         struct dma_buf *dmabuf;
1094         int fd;
1095
1096         dmabuf = __ion_share_dma_buf(client, handle, lock_client);
1097         if (IS_ERR(dmabuf))
1098                 return PTR_ERR(dmabuf);
1099
1100         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1101         if (fd < 0)
1102                 dma_buf_put(dmabuf);
1103
1104         return fd;
1105 }
1106
1107 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1108 {
1109         return __ion_share_dma_buf_fd(client, handle, true);
1110 }
1111 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1112
1113 int ion_share_dma_buf_fd_nolock(struct ion_client *client,
1114                                 struct ion_handle *handle)
1115 {
1116         return __ion_share_dma_buf_fd(client, handle, false);
1117 }
1118
1119 struct ion_handle *ion_import_dma_buf(struct ion_client *client,
1120                                       struct dma_buf *dmabuf)
1121 {
1122         struct ion_buffer *buffer;
1123         struct ion_handle *handle;
1124         int ret;
1125
1126         /* if this memory came from ion */
1127
1128         if (dmabuf->ops != &dma_buf_ops) {
1129                 pr_err("%s: can not import dmabuf from another exporter\n",
1130                        __func__);
1131                 return ERR_PTR(-EINVAL);
1132         }
1133         buffer = dmabuf->priv;
1134
1135         mutex_lock(&client->lock);
1136         /* if a handle exists for this buffer just take a reference to it */
1137         handle = ion_handle_lookup(client, buffer);
1138         if (!IS_ERR(handle)) {
1139                 handle = ion_handle_get_check_overflow(handle);
1140                 mutex_unlock(&client->lock);
1141                 goto end;
1142         }
1143
1144         handle = ion_handle_create(client, buffer);
1145         if (IS_ERR(handle)) {
1146                 mutex_unlock(&client->lock);
1147                 goto end;
1148         }
1149
1150         ret = ion_handle_add(client, handle);
1151         mutex_unlock(&client->lock);
1152         if (ret) {
1153                 ion_handle_put(handle);
1154                 handle = ERR_PTR(ret);
1155         }
1156
1157 end:
1158         return handle;
1159 }
1160 EXPORT_SYMBOL(ion_import_dma_buf);
1161
1162 struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
1163 {
1164         struct dma_buf *dmabuf;
1165         struct ion_handle *handle;
1166
1167         dmabuf = dma_buf_get(fd);
1168         if (IS_ERR(dmabuf))
1169                 return ERR_CAST(dmabuf);
1170
1171         handle = ion_import_dma_buf(client, dmabuf);
1172         dma_buf_put(dmabuf);
1173         return handle;
1174 }
1175 EXPORT_SYMBOL(ion_import_dma_buf_fd);
1176
1177 int ion_sync_for_device(struct ion_client *client, int fd)
1178 {
1179         struct dma_buf *dmabuf;
1180         struct ion_buffer *buffer;
1181
1182         dmabuf = dma_buf_get(fd);
1183         if (IS_ERR(dmabuf))
1184                 return PTR_ERR(dmabuf);
1185
1186         /* if this memory came from ion */
1187         if (dmabuf->ops != &dma_buf_ops) {
1188                 pr_err("%s: can not sync dmabuf from another exporter\n",
1189                        __func__);
1190                 dma_buf_put(dmabuf);
1191                 return -EINVAL;
1192         }
1193         buffer = dmabuf->priv;
1194
1195         dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1196                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1197         dma_buf_put(dmabuf);
1198         return 0;
1199 }
1200
1201 int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
1202 {
1203         struct ion_device *dev = client->dev;
1204         struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
1205         int ret = -EINVAL, cnt = 0, max_cnt;
1206         struct ion_heap *heap;
1207         struct ion_heap_data hdata;
1208
1209         memset(&hdata, 0, sizeof(hdata));
1210
1211         down_read(&dev->lock);
1212         if (!buffer) {
1213                 query->cnt = dev->heap_cnt;
1214                 ret = 0;
1215                 goto out;
1216         }
1217
1218         if (query->cnt <= 0)
1219                 goto out;
1220
1221         max_cnt = query->cnt;
1222
1223         plist_for_each_entry(heap, &dev->heaps, node) {
1224                 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
1225                 hdata.name[sizeof(hdata.name) - 1] = '\0';
1226                 hdata.type = heap->type;
1227                 hdata.heap_id = heap->id;
1228
1229                 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
1230                         ret = -EFAULT;
1231                         goto out;
1232                 }
1233
1234                 cnt++;
1235                 if (cnt >= max_cnt)
1236                         break;
1237         }
1238
1239         query->cnt = cnt;
1240 out:
1241         up_read(&dev->lock);
1242         return ret;
1243 }
1244
1245 static int ion_release(struct inode *inode, struct file *file)
1246 {
1247         struct ion_client *client = file->private_data;
1248
1249         pr_debug("%s: %d\n", __func__, __LINE__);
1250         ion_client_destroy(client);
1251         return 0;
1252 }
1253
1254 static int ion_open(struct inode *inode, struct file *file)
1255 {
1256         struct miscdevice *miscdev = file->private_data;
1257         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1258         struct ion_client *client;
1259         char debug_name[64];
1260
1261         pr_debug("%s: %d\n", __func__, __LINE__);
1262         snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1263         client = ion_client_create(dev, debug_name);
1264         if (IS_ERR(client))
1265                 return PTR_ERR(client);
1266         file->private_data = client;
1267
1268         return 0;
1269 }
1270
1271 static const struct file_operations ion_fops = {
1272         .owner          = THIS_MODULE,
1273         .open           = ion_open,
1274         .release        = ion_release,
1275         .unlocked_ioctl = ion_ioctl,
1276         .compat_ioctl   = compat_ion_ioctl,
1277 };
1278
1279 static size_t ion_debug_heap_total(struct ion_client *client,
1280                                    unsigned int id)
1281 {
1282         size_t size = 0;
1283         struct rb_node *n;
1284
1285         mutex_lock(&client->lock);
1286         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1287                 struct ion_handle *handle = rb_entry(n,
1288                                                      struct ion_handle,
1289                                                      node);
1290                 if (handle->buffer->heap->id == id)
1291                         size += handle->buffer->size;
1292         }
1293         mutex_unlock(&client->lock);
1294         return size;
1295 }
1296
1297 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1298 {
1299         struct ion_heap *heap = s->private;
1300         struct ion_device *dev = heap->dev;
1301         struct rb_node *n;
1302         size_t total_size = 0;
1303         size_t total_orphaned_size = 0;
1304
1305         seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
1306         seq_puts(s, "----------------------------------------------------\n");
1307
1308         mutex_lock(&debugfs_mutex);
1309         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1310                 struct ion_client *client = rb_entry(n, struct ion_client,
1311                                                      node);
1312                 size_t size = ion_debug_heap_total(client, heap->id);
1313
1314                 if (!size)
1315                         continue;
1316                 if (client->task) {
1317                         char task_comm[TASK_COMM_LEN];
1318
1319                         get_task_comm(task_comm, client->task);
1320                         seq_printf(s, "%16s %16u %16zu\n", task_comm,
1321                                    client->pid, size);
1322                 } else {
1323                         seq_printf(s, "%16s %16u %16zu\n", client->name,
1324                                    client->pid, size);
1325                 }
1326         }
1327         mutex_unlock(&debugfs_mutex);
1328
1329         seq_puts(s, "----------------------------------------------------\n");
1330         seq_puts(s, "orphaned allocations (info is from last known client):\n");
1331         mutex_lock(&dev->buffer_lock);
1332         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1333                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1334                                                      node);
1335                 if (buffer->heap->id != heap->id)
1336                         continue;
1337                 total_size += buffer->size;
1338                 if (!buffer->handle_count) {
1339                         seq_printf(s, "%16s %16u %16zu %d %d\n",
1340                                    buffer->task_comm, buffer->pid,
1341                                    buffer->size, buffer->kmap_cnt,
1342                                    atomic_read(&buffer->ref.refcount));
1343                         total_orphaned_size += buffer->size;
1344                 }
1345         }
1346         mutex_unlock(&dev->buffer_lock);
1347         seq_puts(s, "----------------------------------------------------\n");
1348         seq_printf(s, "%16s %16zu\n", "total orphaned",
1349                    total_orphaned_size);
1350         seq_printf(s, "%16s %16zu\n", "total ", total_size);
1351         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1352                 seq_printf(s, "%16s %16zu\n", "deferred free",
1353                            heap->free_list_size);
1354         seq_puts(s, "----------------------------------------------------\n");
1355
1356         if (heap->debug_show)
1357                 heap->debug_show(heap, s, unused);
1358
1359         return 0;
1360 }
1361
1362 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1363 {
1364         return single_open(file, ion_debug_heap_show, inode->i_private);
1365 }
1366
1367 static const struct file_operations debug_heap_fops = {
1368         .open = ion_debug_heap_open,
1369         .read = seq_read,
1370         .llseek = seq_lseek,
1371         .release = single_release,
1372 };
1373
1374 static int debug_shrink_set(void *data, u64 val)
1375 {
1376         struct ion_heap *heap = data;
1377         struct shrink_control sc;
1378         int objs;
1379
1380         sc.gfp_mask = GFP_HIGHUSER;
1381         sc.nr_to_scan = val;
1382
1383         if (!val) {
1384                 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1385                 sc.nr_to_scan = objs;
1386         }
1387
1388         heap->shrinker.scan_objects(&heap->shrinker, &sc);
1389         return 0;
1390 }
1391
1392 static int debug_shrink_get(void *data, u64 *val)
1393 {
1394         struct ion_heap *heap = data;
1395         struct shrink_control sc;
1396         int objs;
1397
1398         sc.gfp_mask = GFP_HIGHUSER;
1399         sc.nr_to_scan = 0;
1400
1401         objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1402         *val = objs;
1403         return 0;
1404 }
1405
1406 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1407                         debug_shrink_set, "%llu\n");
1408
1409 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1410 {
1411         struct dentry *debug_file;
1412
1413         if (!heap->ops->allocate || !heap->ops->free)
1414                 pr_err("%s: can not add heap with invalid ops struct.\n",
1415                        __func__);
1416
1417         spin_lock_init(&heap->free_lock);
1418         heap->free_list_size = 0;
1419
1420         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1421                 ion_heap_init_deferred_free(heap);
1422
1423         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1424                 ion_heap_init_shrinker(heap);
1425
1426         heap->dev = dev;
1427         down_write(&dev->lock);
1428         /*
1429          * use negative heap->id to reverse the priority -- when traversing
1430          * the list later attempt higher id numbers first
1431          */
1432         plist_node_init(&heap->node, -heap->id);
1433         plist_add(&heap->node, &dev->heaps);
1434         debug_file = debugfs_create_file(heap->name, 0664,
1435                                          dev->heaps_debug_root, heap,
1436                                          &debug_heap_fops);
1437
1438         if (!debug_file) {
1439                 char buf[256], *path;
1440
1441                 path = dentry_path(dev->heaps_debug_root, buf, 256);
1442                 pr_err("Failed to create heap debugfs at %s/%s\n",
1443                        path, heap->name);
1444         }
1445
1446         if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
1447                 char debug_name[64];
1448
1449                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1450                 debug_file = debugfs_create_file(
1451                         debug_name, 0644, dev->heaps_debug_root, heap,
1452                         &debug_shrink_fops);
1453                 if (!debug_file) {
1454                         char buf[256], *path;
1455
1456                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1457                         pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1458                                path, debug_name);
1459                 }
1460         }
1461
1462         dev->heap_cnt++;
1463         up_write(&dev->lock);
1464 }
1465 EXPORT_SYMBOL(ion_device_add_heap);
1466
1467 struct ion_device *ion_device_create(long (*custom_ioctl)
1468                                      (struct ion_client *client,
1469                                       unsigned int cmd,
1470                                       unsigned long arg))
1471 {
1472         struct ion_device *idev;
1473         int ret;
1474
1475         idev = kzalloc(sizeof(*idev), GFP_KERNEL);
1476         if (!idev)
1477                 return ERR_PTR(-ENOMEM);
1478
1479         idev->dev.minor = MISC_DYNAMIC_MINOR;
1480         idev->dev.name = "ion";
1481         idev->dev.fops = &ion_fops;
1482         idev->dev.parent = NULL;
1483         ret = misc_register(&idev->dev);
1484         if (ret) {
1485                 pr_err("ion: failed to register misc device.\n");
1486                 kfree(idev);
1487                 return ERR_PTR(ret);
1488         }
1489
1490         idev->debug_root = debugfs_create_dir("ion", NULL);
1491         if (!idev->debug_root) {
1492                 pr_err("ion: failed to create debugfs root directory.\n");
1493                 goto debugfs_done;
1494         }
1495         idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1496         if (!idev->heaps_debug_root) {
1497                 pr_err("ion: failed to create debugfs heaps directory.\n");
1498                 goto debugfs_done;
1499         }
1500         idev->clients_debug_root = debugfs_create_dir("clients",
1501                                                 idev->debug_root);
1502         if (!idev->clients_debug_root)
1503                 pr_err("ion: failed to create debugfs clients directory.\n");
1504
1505 debugfs_done:
1506
1507         idev->custom_ioctl = custom_ioctl;
1508         idev->buffers = RB_ROOT;
1509         mutex_init(&idev->buffer_lock);
1510         init_rwsem(&idev->lock);
1511         plist_head_init(&idev->heaps);
1512         idev->clients = RB_ROOT;
1513         ion_root_client = &idev->clients;
1514         mutex_init(&debugfs_mutex);
1515         return idev;
1516 }
1517 EXPORT_SYMBOL(ion_device_create);
1518
1519 void ion_device_destroy(struct ion_device *dev)
1520 {
1521         misc_deregister(&dev->dev);
1522         debugfs_remove_recursive(dev->debug_root);
1523         /* XXX need to free the heaps and clients ? */
1524         kfree(dev);
1525 }
1526 EXPORT_SYMBOL(ion_device_destroy);