GNU Linux-libre 4.4.295-gnu1
[releases.git] / drivers / staging / android / ion / ion.c
1 /*
2  *
3  * drivers/staging/android/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/atomic.h>
19 #include <linux/device.h>
20 #include <linux/err.h>
21 #include <linux/file.h>
22 #include <linux/freezer.h>
23 #include <linux/fs.h>
24 #include <linux/anon_inodes.h>
25 #include <linux/kthread.h>
26 #include <linux/list.h>
27 #include <linux/memblock.h>
28 #include <linux/miscdevice.h>
29 #include <linux/export.h>
30 #include <linux/mm.h>
31 #include <linux/mm_types.h>
32 #include <linux/rbtree.h>
33 #include <linux/slab.h>
34 #include <linux/seq_file.h>
35 #include <linux/uaccess.h>
36 #include <linux/vmalloc.h>
37 #include <linux/debugfs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/idr.h>
40
41 #include "ion.h"
42 #include "ion_priv.h"
43 #include "compat_ion.h"
44
45 /**
46  * struct ion_device - the metadata of the ion device node
47  * @dev:                the actual misc device
48  * @buffers:            an rb tree of all the existing buffers
49  * @buffer_lock:        lock protecting the tree of buffers
50  * @lock:               rwsem protecting the tree of heaps and clients
51  * @heaps:              list of all the heaps in the system
52  * @user_clients:       list of all the clients created from userspace
53  */
54 struct ion_device {
55         struct miscdevice dev;
56         struct rb_root buffers;
57         struct mutex buffer_lock;
58         struct rw_semaphore lock;
59         struct plist_head heaps;
60         long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
61                              unsigned long arg);
62         struct rb_root clients;
63         struct dentry *debug_root;
64         struct dentry *heaps_debug_root;
65         struct dentry *clients_debug_root;
66 };
67
68 /**
69  * struct ion_client - a process/hw block local address space
70  * @node:               node in the tree of all clients
71  * @dev:                backpointer to ion device
72  * @handles:            an rb tree of all the handles in this client
73  * @idr:                an idr space for allocating handle ids
74  * @lock:               lock protecting the tree of handles
75  * @name:               used for debugging
76  * @display_name:       used for debugging (unique version of @name)
77  * @display_serial:     used for debugging (to make display_name unique)
78  * @task:               used for debugging
79  *
80  * A client represents a list of buffers this client may access.
81  * The mutex stored here is used to protect both handles tree
82  * as well as the handles themselves, and should be held while modifying either.
83  */
84 struct ion_client {
85         struct rb_node node;
86         struct ion_device *dev;
87         struct rb_root handles;
88         struct idr idr;
89         struct mutex lock;
90         const char *name;
91         char *display_name;
92         int display_serial;
93         struct task_struct *task;
94         pid_t pid;
95         struct dentry *debug_root;
96 };
97
98 /**
99  * ion_handle - a client local reference to a buffer
100  * @ref:                reference count
101  * @client:             back pointer to the client the buffer resides in
102  * @buffer:             pointer to the buffer
103  * @node:               node in the client's handle rbtree
104  * @kmap_cnt:           count of times this client has mapped to kernel
105  * @id:                 client-unique id allocated by client->idr
106  *
107  * Modifications to node, map_cnt or mapping should be protected by the
108  * lock in the client.  Other fields are never changed after initialization.
109  */
110 struct ion_handle {
111         struct kref ref;
112         struct ion_client *client;
113         struct ion_buffer *buffer;
114         struct rb_node node;
115         unsigned int kmap_cnt;
116         int id;
117 };
118
119 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
120 {
121         return (buffer->flags & ION_FLAG_CACHED) &&
122                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
123 }
124
125 bool ion_buffer_cached(struct ion_buffer *buffer)
126 {
127         return !!(buffer->flags & ION_FLAG_CACHED);
128 }
129
130 static inline struct page *ion_buffer_page(struct page *page)
131 {
132         return (struct page *)((unsigned long)page & ~(1UL));
133 }
134
135 static inline bool ion_buffer_page_is_dirty(struct page *page)
136 {
137         return !!((unsigned long)page & 1UL);
138 }
139
140 static inline void ion_buffer_page_dirty(struct page **page)
141 {
142         *page = (struct page *)((unsigned long)(*page) | 1UL);
143 }
144
145 static inline void ion_buffer_page_clean(struct page **page)
146 {
147         *page = (struct page *)((unsigned long)(*page) & ~(1UL));
148 }
149
150 /* this function should only be called while dev->lock is held */
151 static void ion_buffer_add(struct ion_device *dev,
152                            struct ion_buffer *buffer)
153 {
154         struct rb_node **p = &dev->buffers.rb_node;
155         struct rb_node *parent = NULL;
156         struct ion_buffer *entry;
157
158         while (*p) {
159                 parent = *p;
160                 entry = rb_entry(parent, struct ion_buffer, node);
161
162                 if (buffer < entry) {
163                         p = &(*p)->rb_left;
164                 } else if (buffer > entry) {
165                         p = &(*p)->rb_right;
166                 } else {
167                         pr_err("%s: buffer already found.", __func__);
168                         BUG();
169                 }
170         }
171
172         rb_link_node(&buffer->node, parent, p);
173         rb_insert_color(&buffer->node, &dev->buffers);
174 }
175
176 /* this function should only be called while dev->lock is held */
177 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
178                                      struct ion_device *dev,
179                                      unsigned long len,
180                                      unsigned long align,
181                                      unsigned long flags)
182 {
183         struct ion_buffer *buffer;
184         struct sg_table *table;
185         struct scatterlist *sg;
186         int i, ret;
187
188         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
189         if (!buffer)
190                 return ERR_PTR(-ENOMEM);
191
192         buffer->heap = heap;
193         buffer->flags = flags;
194         kref_init(&buffer->ref);
195
196         ret = heap->ops->allocate(heap, buffer, len, align, flags);
197
198         if (ret) {
199                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
200                         goto err2;
201
202                 ion_heap_freelist_drain(heap, 0);
203                 ret = heap->ops->allocate(heap, buffer, len, align,
204                                           flags);
205                 if (ret)
206                         goto err2;
207         }
208
209         buffer->dev = dev;
210         buffer->size = len;
211
212         table = heap->ops->map_dma(heap, buffer);
213         if (WARN_ONCE(table == NULL,
214                         "heap->ops->map_dma should return ERR_PTR on error"))
215                 table = ERR_PTR(-EINVAL);
216         if (IS_ERR(table)) {
217                 ret = -EINVAL;
218                 goto err1;
219         }
220
221         buffer->sg_table = table;
222         if (ion_buffer_fault_user_mappings(buffer)) {
223                 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
224                 struct scatterlist *sg;
225                 int i, j, k = 0;
226
227                 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
228                 if (!buffer->pages) {
229                         ret = -ENOMEM;
230                         goto err;
231                 }
232
233                 for_each_sg(table->sgl, sg, table->nents, i) {
234                         struct page *page = sg_page(sg);
235
236                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
237                                 buffer->pages[k++] = page++;
238                 }
239         }
240
241         buffer->dev = dev;
242         buffer->size = len;
243         INIT_LIST_HEAD(&buffer->vmas);
244         mutex_init(&buffer->lock);
245         /*
246          * this will set up dma addresses for the sglist -- it is not
247          * technically correct as per the dma api -- a specific
248          * device isn't really taking ownership here.  However, in practice on
249          * our systems the only dma_address space is physical addresses.
250          * Additionally, we can't afford the overhead of invalidating every
251          * allocation via dma_map_sg. The implicit contract here is that
252          * memory coming from the heaps is ready for dma, ie if it has a
253          * cached mapping that mapping has been invalidated
254          */
255         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
256                 sg_dma_address(sg) = sg_phys(sg);
257                 sg_dma_len(sg) = sg->length;
258         }
259         mutex_lock(&dev->buffer_lock);
260         ion_buffer_add(dev, buffer);
261         mutex_unlock(&dev->buffer_lock);
262         return buffer;
263
264 err:
265         heap->ops->unmap_dma(heap, buffer);
266 err1:
267         heap->ops->free(buffer);
268 err2:
269         kfree(buffer);
270         return ERR_PTR(ret);
271 }
272
273 void ion_buffer_destroy(struct ion_buffer *buffer)
274 {
275         if (WARN_ON(buffer->kmap_cnt > 0))
276                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
277         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
278         buffer->heap->ops->free(buffer);
279         vfree(buffer->pages);
280         kfree(buffer);
281 }
282
283 static void _ion_buffer_destroy(struct kref *kref)
284 {
285         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
286         struct ion_heap *heap = buffer->heap;
287         struct ion_device *dev = buffer->dev;
288
289         mutex_lock(&dev->buffer_lock);
290         rb_erase(&buffer->node, &dev->buffers);
291         mutex_unlock(&dev->buffer_lock);
292
293         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
294                 ion_heap_freelist_add(heap, buffer);
295         else
296                 ion_buffer_destroy(buffer);
297 }
298
299 static void ion_buffer_get(struct ion_buffer *buffer)
300 {
301         kref_get(&buffer->ref);
302 }
303
304 static int ion_buffer_put(struct ion_buffer *buffer)
305 {
306         return kref_put(&buffer->ref, _ion_buffer_destroy);
307 }
308
309 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
310 {
311         mutex_lock(&buffer->lock);
312         buffer->handle_count++;
313         mutex_unlock(&buffer->lock);
314 }
315
316 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
317 {
318         /*
319          * when a buffer is removed from a handle, if it is not in
320          * any other handles, copy the taskcomm and the pid of the
321          * process it's being removed from into the buffer.  At this
322          * point there will be no way to track what processes this buffer is
323          * being used by, it only exists as a dma_buf file descriptor.
324          * The taskcomm and pid can provide a debug hint as to where this fd
325          * is in the system
326          */
327         mutex_lock(&buffer->lock);
328         buffer->handle_count--;
329         BUG_ON(buffer->handle_count < 0);
330         if (!buffer->handle_count) {
331                 struct task_struct *task;
332
333                 task = current->group_leader;
334                 get_task_comm(buffer->task_comm, task);
335                 buffer->pid = task_pid_nr(task);
336         }
337         mutex_unlock(&buffer->lock);
338 }
339
340 static struct ion_handle *ion_handle_create(struct ion_client *client,
341                                      struct ion_buffer *buffer)
342 {
343         struct ion_handle *handle;
344
345         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
346         if (!handle)
347                 return ERR_PTR(-ENOMEM);
348         kref_init(&handle->ref);
349         RB_CLEAR_NODE(&handle->node);
350         handle->client = client;
351         ion_buffer_get(buffer);
352         ion_buffer_add_to_handle(buffer);
353         handle->buffer = buffer;
354
355         return handle;
356 }
357
358 static void ion_handle_kmap_put(struct ion_handle *);
359
360 static void ion_handle_destroy(struct kref *kref)
361 {
362         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
363         struct ion_client *client = handle->client;
364         struct ion_buffer *buffer = handle->buffer;
365
366         mutex_lock(&buffer->lock);
367         while (handle->kmap_cnt)
368                 ion_handle_kmap_put(handle);
369         mutex_unlock(&buffer->lock);
370
371         idr_remove(&client->idr, handle->id);
372         if (!RB_EMPTY_NODE(&handle->node))
373                 rb_erase(&handle->node, &client->handles);
374
375         ion_buffer_remove_from_handle(buffer);
376         ion_buffer_put(buffer);
377
378         kfree(handle);
379 }
380
381 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
382 {
383         return handle->buffer;
384 }
385
386 static void ion_handle_get(struct ion_handle *handle)
387 {
388         kref_get(&handle->ref);
389 }
390
391 /* Must hold the client lock */
392 static struct ion_handle *ion_handle_get_check_overflow(
393                                         struct ion_handle *handle)
394 {
395         if (atomic_read(&handle->ref.refcount) + 1 == 0)
396                 return ERR_PTR(-EOVERFLOW);
397         ion_handle_get(handle);
398         return handle;
399 }
400
401 static int ion_handle_put_nolock(struct ion_handle *handle)
402 {
403         int ret;
404
405         ret = kref_put(&handle->ref, ion_handle_destroy);
406
407         return ret;
408 }
409
410 int ion_handle_put(struct ion_handle *handle)
411 {
412         struct ion_client *client = handle->client;
413         int ret;
414
415         mutex_lock(&client->lock);
416         ret = ion_handle_put_nolock(handle);
417         mutex_unlock(&client->lock);
418
419         return ret;
420 }
421
422 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
423                                             struct ion_buffer *buffer)
424 {
425         struct rb_node *n = client->handles.rb_node;
426
427         while (n) {
428                 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
429
430                 if (buffer < entry->buffer)
431                         n = n->rb_left;
432                 else if (buffer > entry->buffer)
433                         n = n->rb_right;
434                 else
435                         return entry;
436         }
437         return ERR_PTR(-EINVAL);
438 }
439
440 static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
441                                                 int id)
442 {
443         struct ion_handle *handle;
444
445         handle = idr_find(&client->idr, id);
446         if (handle)
447                 return ion_handle_get_check_overflow(handle);
448
449         return ERR_PTR(-EINVAL);
450 }
451
452 static bool ion_handle_validate(struct ion_client *client,
453                                 struct ion_handle *handle)
454 {
455         WARN_ON(!mutex_is_locked(&client->lock));
456         return idr_find(&client->idr, handle->id) == handle;
457 }
458
459 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
460 {
461         int id;
462         struct rb_node **p = &client->handles.rb_node;
463         struct rb_node *parent = NULL;
464         struct ion_handle *entry;
465
466         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
467         if (id < 0)
468                 return id;
469
470         handle->id = id;
471
472         while (*p) {
473                 parent = *p;
474                 entry = rb_entry(parent, struct ion_handle, node);
475
476                 if (handle->buffer < entry->buffer)
477                         p = &(*p)->rb_left;
478                 else if (handle->buffer > entry->buffer)
479                         p = &(*p)->rb_right;
480                 else
481                         WARN(1, "%s: buffer already found.", __func__);
482         }
483
484         rb_link_node(&handle->node, parent, p);
485         rb_insert_color(&handle->node, &client->handles);
486
487         return 0;
488 }
489
490 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
491                              size_t align, unsigned int heap_id_mask,
492                              unsigned int flags)
493 {
494         struct ion_handle *handle;
495         struct ion_device *dev = client->dev;
496         struct ion_buffer *buffer = NULL;
497         struct ion_heap *heap;
498         int ret;
499
500         pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
501                  len, align, heap_id_mask, flags);
502         /*
503          * traverse the list of heaps available in this system in priority
504          * order.  If the heap type is supported by the client, and matches the
505          * request of the caller allocate from it.  Repeat until allocate has
506          * succeeded or all heaps have been tried
507          */
508         len = PAGE_ALIGN(len);
509
510         if (!len)
511                 return ERR_PTR(-EINVAL);
512
513         down_read(&dev->lock);
514         plist_for_each_entry(heap, &dev->heaps, node) {
515                 /* if the caller didn't specify this heap id */
516                 if (!((1 << heap->id) & heap_id_mask))
517                         continue;
518                 buffer = ion_buffer_create(heap, dev, len, align, flags);
519                 if (!IS_ERR(buffer))
520                         break;
521         }
522         up_read(&dev->lock);
523
524         if (buffer == NULL)
525                 return ERR_PTR(-ENODEV);
526
527         if (IS_ERR(buffer))
528                 return ERR_CAST(buffer);
529
530         handle = ion_handle_create(client, buffer);
531
532         /*
533          * ion_buffer_create will create a buffer with a ref_cnt of 1,
534          * and ion_handle_create will take a second reference, drop one here
535          */
536         ion_buffer_put(buffer);
537
538         if (IS_ERR(handle))
539                 return handle;
540
541         mutex_lock(&client->lock);
542         ret = ion_handle_add(client, handle);
543         mutex_unlock(&client->lock);
544         if (ret) {
545                 ion_handle_put(handle);
546                 handle = ERR_PTR(ret);
547         }
548
549         return handle;
550 }
551 EXPORT_SYMBOL(ion_alloc);
552
553 static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
554 {
555         bool valid_handle;
556
557         BUG_ON(client != handle->client);
558
559         valid_handle = ion_handle_validate(client, handle);
560
561         if (!valid_handle) {
562                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
563                 return;
564         }
565         ion_handle_put_nolock(handle);
566 }
567
568 void ion_free(struct ion_client *client, struct ion_handle *handle)
569 {
570         BUG_ON(client != handle->client);
571
572         mutex_lock(&client->lock);
573         ion_free_nolock(client, handle);
574         mutex_unlock(&client->lock);
575 }
576 EXPORT_SYMBOL(ion_free);
577
578 int ion_phys(struct ion_client *client, struct ion_handle *handle,
579              ion_phys_addr_t *addr, size_t *len)
580 {
581         struct ion_buffer *buffer;
582         int ret;
583
584         mutex_lock(&client->lock);
585         if (!ion_handle_validate(client, handle)) {
586                 mutex_unlock(&client->lock);
587                 return -EINVAL;
588         }
589
590         buffer = handle->buffer;
591
592         if (!buffer->heap->ops->phys) {
593                 pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
594                         __func__, buffer->heap->name, buffer->heap->type);
595                 mutex_unlock(&client->lock);
596                 return -ENODEV;
597         }
598         mutex_unlock(&client->lock);
599         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
600         return ret;
601 }
602 EXPORT_SYMBOL(ion_phys);
603
604 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
605 {
606         void *vaddr;
607
608         if (buffer->kmap_cnt) {
609                 if (buffer->kmap_cnt == INT_MAX)
610                         return ERR_PTR(-EOVERFLOW);
611
612                 buffer->kmap_cnt++;
613                 return buffer->vaddr;
614         }
615         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
616         if (WARN_ONCE(vaddr == NULL,
617                         "heap->ops->map_kernel should return ERR_PTR on error"))
618                 return ERR_PTR(-EINVAL);
619         if (IS_ERR(vaddr))
620                 return vaddr;
621         buffer->vaddr = vaddr;
622         buffer->kmap_cnt++;
623         return vaddr;
624 }
625
626 static void *ion_handle_kmap_get(struct ion_handle *handle)
627 {
628         struct ion_buffer *buffer = handle->buffer;
629         void *vaddr;
630
631         if (handle->kmap_cnt) {
632                 if (handle->kmap_cnt == INT_MAX)
633                         return ERR_PTR(-EOVERFLOW);
634
635                 handle->kmap_cnt++;
636                 return buffer->vaddr;
637         }
638         vaddr = ion_buffer_kmap_get(buffer);
639         if (IS_ERR(vaddr))
640                 return vaddr;
641         handle->kmap_cnt++;
642         return vaddr;
643 }
644
645 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
646 {
647         buffer->kmap_cnt--;
648         if (!buffer->kmap_cnt) {
649                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
650                 buffer->vaddr = NULL;
651         }
652 }
653
654 static void ion_handle_kmap_put(struct ion_handle *handle)
655 {
656         struct ion_buffer *buffer = handle->buffer;
657
658         if (!handle->kmap_cnt) {
659                 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
660                 return;
661         }
662         handle->kmap_cnt--;
663         if (!handle->kmap_cnt)
664                 ion_buffer_kmap_put(buffer);
665 }
666
667 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
668 {
669         struct ion_buffer *buffer;
670         void *vaddr;
671
672         mutex_lock(&client->lock);
673         if (!ion_handle_validate(client, handle)) {
674                 pr_err("%s: invalid handle passed to map_kernel.\n",
675                        __func__);
676                 mutex_unlock(&client->lock);
677                 return ERR_PTR(-EINVAL);
678         }
679
680         buffer = handle->buffer;
681
682         if (!handle->buffer->heap->ops->map_kernel) {
683                 pr_err("%s: map_kernel is not implemented by this heap.\n",
684                        __func__);
685                 mutex_unlock(&client->lock);
686                 return ERR_PTR(-ENODEV);
687         }
688
689         mutex_lock(&buffer->lock);
690         vaddr = ion_handle_kmap_get(handle);
691         mutex_unlock(&buffer->lock);
692         mutex_unlock(&client->lock);
693         return vaddr;
694 }
695 EXPORT_SYMBOL(ion_map_kernel);
696
697 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
698 {
699         struct ion_buffer *buffer;
700
701         mutex_lock(&client->lock);
702         buffer = handle->buffer;
703         mutex_lock(&buffer->lock);
704         ion_handle_kmap_put(handle);
705         mutex_unlock(&buffer->lock);
706         mutex_unlock(&client->lock);
707 }
708 EXPORT_SYMBOL(ion_unmap_kernel);
709
710 static int ion_debug_client_show(struct seq_file *s, void *unused)
711 {
712         struct ion_client *client = s->private;
713         struct rb_node *n;
714         size_t sizes[ION_NUM_HEAP_IDS] = {0};
715         const char *names[ION_NUM_HEAP_IDS] = {NULL};
716         int i;
717
718         mutex_lock(&client->lock);
719         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
720                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
721                                                      node);
722                 unsigned int id = handle->buffer->heap->id;
723
724                 if (!names[id])
725                         names[id] = handle->buffer->heap->name;
726                 sizes[id] += handle->buffer->size;
727         }
728         mutex_unlock(&client->lock);
729
730         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
731         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
732                 if (!names[i])
733                         continue;
734                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
735         }
736         return 0;
737 }
738
739 static int ion_debug_client_open(struct inode *inode, struct file *file)
740 {
741         return single_open(file, ion_debug_client_show, inode->i_private);
742 }
743
744 static const struct file_operations debug_client_fops = {
745         .open = ion_debug_client_open,
746         .read = seq_read,
747         .llseek = seq_lseek,
748         .release = single_release,
749 };
750
751 static int ion_get_client_serial(const struct rb_root *root,
752                                         const unsigned char *name)
753 {
754         int serial = -1;
755         struct rb_node *node;
756
757         for (node = rb_first(root); node; node = rb_next(node)) {
758                 struct ion_client *client = rb_entry(node, struct ion_client,
759                                                 node);
760
761                 if (strcmp(client->name, name))
762                         continue;
763                 serial = max(serial, client->display_serial);
764         }
765         return serial + 1;
766 }
767
768 struct ion_client *ion_client_create(struct ion_device *dev,
769                                      const char *name)
770 {
771         struct ion_client *client;
772         struct task_struct *task;
773         struct rb_node **p;
774         struct rb_node *parent = NULL;
775         struct ion_client *entry;
776         pid_t pid;
777
778         if (!name) {
779                 pr_err("%s: Name cannot be null\n", __func__);
780                 return ERR_PTR(-EINVAL);
781         }
782
783         get_task_struct(current->group_leader);
784         task_lock(current->group_leader);
785         pid = task_pid_nr(current->group_leader);
786         /*
787          * don't bother to store task struct for kernel threads,
788          * they can't be killed anyway
789          */
790         if (current->group_leader->flags & PF_KTHREAD) {
791                 put_task_struct(current->group_leader);
792                 task = NULL;
793         } else {
794                 task = current->group_leader;
795         }
796         task_unlock(current->group_leader);
797
798         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
799         if (!client)
800                 goto err_put_task_struct;
801
802         client->dev = dev;
803         client->handles = RB_ROOT;
804         idr_init(&client->idr);
805         mutex_init(&client->lock);
806         client->task = task;
807         client->pid = pid;
808         client->name = kstrdup(name, GFP_KERNEL);
809         if (!client->name)
810                 goto err_free_client;
811
812         down_write(&dev->lock);
813         client->display_serial = ion_get_client_serial(&dev->clients, name);
814         client->display_name = kasprintf(
815                 GFP_KERNEL, "%s-%d", name, client->display_serial);
816         if (!client->display_name) {
817                 up_write(&dev->lock);
818                 goto err_free_client_name;
819         }
820         p = &dev->clients.rb_node;
821         while (*p) {
822                 parent = *p;
823                 entry = rb_entry(parent, struct ion_client, node);
824
825                 if (client < entry)
826                         p = &(*p)->rb_left;
827                 else if (client > entry)
828                         p = &(*p)->rb_right;
829         }
830         rb_link_node(&client->node, parent, p);
831         rb_insert_color(&client->node, &dev->clients);
832
833         client->debug_root = debugfs_create_file(client->display_name, 0664,
834                                                 dev->clients_debug_root,
835                                                 client, &debug_client_fops);
836         if (!client->debug_root) {
837                 char buf[256], *path;
838
839                 path = dentry_path(dev->clients_debug_root, buf, 256);
840                 pr_err("Failed to create client debugfs at %s/%s\n",
841                         path, client->display_name);
842         }
843
844         up_write(&dev->lock);
845
846         return client;
847
848 err_free_client_name:
849         kfree(client->name);
850 err_free_client:
851         kfree(client);
852 err_put_task_struct:
853         if (task)
854                 put_task_struct(current->group_leader);
855         return ERR_PTR(-ENOMEM);
856 }
857 EXPORT_SYMBOL(ion_client_create);
858
859 void ion_client_destroy(struct ion_client *client)
860 {
861         struct ion_device *dev = client->dev;
862         struct rb_node *n;
863
864         pr_debug("%s: %d\n", __func__, __LINE__);
865         while ((n = rb_first(&client->handles))) {
866                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
867                                                      node);
868                 ion_handle_destroy(&handle->ref);
869         }
870
871         idr_destroy(&client->idr);
872
873         down_write(&dev->lock);
874         if (client->task)
875                 put_task_struct(client->task);
876         rb_erase(&client->node, &dev->clients);
877         debugfs_remove_recursive(client->debug_root);
878         up_write(&dev->lock);
879
880         kfree(client->display_name);
881         kfree(client->name);
882         kfree(client);
883 }
884 EXPORT_SYMBOL(ion_client_destroy);
885
886 struct sg_table *ion_sg_table(struct ion_client *client,
887                               struct ion_handle *handle)
888 {
889         struct ion_buffer *buffer;
890         struct sg_table *table;
891
892         mutex_lock(&client->lock);
893         if (!ion_handle_validate(client, handle)) {
894                 pr_err("%s: invalid handle passed to map_dma.\n",
895                        __func__);
896                 mutex_unlock(&client->lock);
897                 return ERR_PTR(-EINVAL);
898         }
899         buffer = handle->buffer;
900         table = buffer->sg_table;
901         mutex_unlock(&client->lock);
902         return table;
903 }
904 EXPORT_SYMBOL(ion_sg_table);
905
906 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
907                                        struct device *dev,
908                                        enum dma_data_direction direction);
909
910 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
911                                         enum dma_data_direction direction)
912 {
913         struct dma_buf *dmabuf = attachment->dmabuf;
914         struct ion_buffer *buffer = dmabuf->priv;
915
916         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
917         return buffer->sg_table;
918 }
919
920 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
921                               struct sg_table *table,
922                               enum dma_data_direction direction)
923 {
924 }
925
926 void ion_pages_sync_for_device(struct device *dev, struct page *page,
927                 size_t size, enum dma_data_direction dir)
928 {
929         struct scatterlist sg;
930
931         sg_init_table(&sg, 1);
932         sg_set_page(&sg, page, size, 0);
933         /*
934          * This is not correct - sg_dma_address needs a dma_addr_t that is valid
935          * for the targeted device, but this works on the currently targeted
936          * hardware.
937          */
938         sg_dma_address(&sg) = page_to_phys(page);
939         dma_sync_sg_for_device(dev, &sg, 1, dir);
940 }
941
942 struct ion_vma_list {
943         struct list_head list;
944         struct vm_area_struct *vma;
945 };
946
947 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
948                                        struct device *dev,
949                                        enum dma_data_direction dir)
950 {
951         struct ion_vma_list *vma_list;
952         int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
953         int i;
954
955         pr_debug("%s: syncing for device %s\n", __func__,
956                  dev ? dev_name(dev) : "null");
957
958         if (!ion_buffer_fault_user_mappings(buffer))
959                 return;
960
961         mutex_lock(&buffer->lock);
962         for (i = 0; i < pages; i++) {
963                 struct page *page = buffer->pages[i];
964
965                 if (ion_buffer_page_is_dirty(page))
966                         ion_pages_sync_for_device(dev, ion_buffer_page(page),
967                                                         PAGE_SIZE, dir);
968
969                 ion_buffer_page_clean(buffer->pages + i);
970         }
971         list_for_each_entry(vma_list, &buffer->vmas, list) {
972                 struct vm_area_struct *vma = vma_list->vma;
973
974                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
975                                NULL);
976         }
977         mutex_unlock(&buffer->lock);
978 }
979
980 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
981 {
982         struct ion_buffer *buffer = vma->vm_private_data;
983         unsigned long pfn;
984         int ret;
985
986         mutex_lock(&buffer->lock);
987         ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
988         BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
989
990         pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
991         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
992         mutex_unlock(&buffer->lock);
993         if (ret)
994                 return VM_FAULT_ERROR;
995
996         return VM_FAULT_NOPAGE;
997 }
998
999 static void ion_vm_open(struct vm_area_struct *vma)
1000 {
1001         struct ion_buffer *buffer = vma->vm_private_data;
1002         struct ion_vma_list *vma_list;
1003
1004         vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1005         if (!vma_list)
1006                 return;
1007         vma_list->vma = vma;
1008         mutex_lock(&buffer->lock);
1009         list_add(&vma_list->list, &buffer->vmas);
1010         mutex_unlock(&buffer->lock);
1011         pr_debug("%s: adding %p\n", __func__, vma);
1012 }
1013
1014 static void ion_vm_close(struct vm_area_struct *vma)
1015 {
1016         struct ion_buffer *buffer = vma->vm_private_data;
1017         struct ion_vma_list *vma_list, *tmp;
1018
1019         pr_debug("%s\n", __func__);
1020         mutex_lock(&buffer->lock);
1021         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1022                 if (vma_list->vma != vma)
1023                         continue;
1024                 list_del(&vma_list->list);
1025                 kfree(vma_list);
1026                 pr_debug("%s: deleting %p\n", __func__, vma);
1027                 break;
1028         }
1029         mutex_unlock(&buffer->lock);
1030 }
1031
1032 static const struct vm_operations_struct ion_vma_ops = {
1033         .open = ion_vm_open,
1034         .close = ion_vm_close,
1035         .fault = ion_vm_fault,
1036 };
1037
1038 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1039 {
1040         struct ion_buffer *buffer = dmabuf->priv;
1041         int ret = 0;
1042
1043         if (!buffer->heap->ops->map_user) {
1044                 pr_err("%s: this heap does not define a method for mapping to userspace\n",
1045                         __func__);
1046                 return -EINVAL;
1047         }
1048
1049         if (ion_buffer_fault_user_mappings(buffer)) {
1050                 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1051                                                         VM_DONTDUMP;
1052                 vma->vm_private_data = buffer;
1053                 vma->vm_ops = &ion_vma_ops;
1054                 ion_vm_open(vma);
1055                 return 0;
1056         }
1057
1058         if (!(buffer->flags & ION_FLAG_CACHED))
1059                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1060
1061         mutex_lock(&buffer->lock);
1062         /* now map it to userspace */
1063         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1064         mutex_unlock(&buffer->lock);
1065
1066         if (ret)
1067                 pr_err("%s: failure mapping buffer to userspace\n",
1068                        __func__);
1069
1070         return ret;
1071 }
1072
1073 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1074 {
1075         struct ion_buffer *buffer = dmabuf->priv;
1076
1077         ion_buffer_put(buffer);
1078 }
1079
1080 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1081 {
1082         struct ion_buffer *buffer = dmabuf->priv;
1083
1084         return buffer->vaddr + offset * PAGE_SIZE;
1085 }
1086
1087 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1088                                void *ptr)
1089 {
1090 }
1091
1092 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1093                                         size_t len,
1094                                         enum dma_data_direction direction)
1095 {
1096         struct ion_buffer *buffer = dmabuf->priv;
1097         void *vaddr;
1098
1099         if (!buffer->heap->ops->map_kernel) {
1100                 pr_err("%s: map kernel is not implemented by this heap.\n",
1101                        __func__);
1102                 return -ENODEV;
1103         }
1104
1105         mutex_lock(&buffer->lock);
1106         vaddr = ion_buffer_kmap_get(buffer);
1107         mutex_unlock(&buffer->lock);
1108         return PTR_ERR_OR_ZERO(vaddr);
1109 }
1110
1111 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1112                                        size_t len,
1113                                        enum dma_data_direction direction)
1114 {
1115         struct ion_buffer *buffer = dmabuf->priv;
1116
1117         mutex_lock(&buffer->lock);
1118         ion_buffer_kmap_put(buffer);
1119         mutex_unlock(&buffer->lock);
1120 }
1121
1122 static struct dma_buf_ops dma_buf_ops = {
1123         .map_dma_buf = ion_map_dma_buf,
1124         .unmap_dma_buf = ion_unmap_dma_buf,
1125         .mmap = ion_mmap,
1126         .release = ion_dma_buf_release,
1127         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1128         .end_cpu_access = ion_dma_buf_end_cpu_access,
1129         .kmap_atomic = ion_dma_buf_kmap,
1130         .kunmap_atomic = ion_dma_buf_kunmap,
1131         .kmap = ion_dma_buf_kmap,
1132         .kunmap = ion_dma_buf_kunmap,
1133 };
1134
1135 static struct dma_buf *__ion_share_dma_buf(struct ion_client *client,
1136                                            struct ion_handle *handle,
1137                                            bool lock_client)
1138 {
1139         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1140         struct ion_buffer *buffer;
1141         struct dma_buf *dmabuf;
1142         bool valid_handle;
1143
1144         if (lock_client)
1145                 mutex_lock(&client->lock);
1146         valid_handle = ion_handle_validate(client, handle);
1147         if (!valid_handle) {
1148                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1149                 if (lock_client)
1150                         mutex_unlock(&client->lock);
1151                 return ERR_PTR(-EINVAL);
1152         }
1153         buffer = handle->buffer;
1154         ion_buffer_get(buffer);
1155         if (lock_client)
1156                 mutex_unlock(&client->lock);
1157
1158         exp_info.ops = &dma_buf_ops;
1159         exp_info.size = buffer->size;
1160         exp_info.flags = O_RDWR;
1161         exp_info.priv = buffer;
1162
1163         dmabuf = dma_buf_export(&exp_info);
1164         if (IS_ERR(dmabuf)) {
1165                 ion_buffer_put(buffer);
1166                 return dmabuf;
1167         }
1168
1169         return dmabuf;
1170 }
1171
1172 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1173                                   struct ion_handle *handle)
1174 {
1175         return __ion_share_dma_buf(client, handle, true);
1176 }
1177 EXPORT_SYMBOL(ion_share_dma_buf);
1178
1179 static int __ion_share_dma_buf_fd(struct ion_client *client,
1180                                   struct ion_handle *handle, bool lock_client)
1181 {
1182         struct dma_buf *dmabuf;
1183         int fd;
1184
1185         dmabuf = __ion_share_dma_buf(client, handle, lock_client);
1186         if (IS_ERR(dmabuf))
1187                 return PTR_ERR(dmabuf);
1188
1189         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1190         if (fd < 0)
1191                 dma_buf_put(dmabuf);
1192
1193         return fd;
1194 }
1195
1196 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1197 {
1198         return __ion_share_dma_buf_fd(client, handle, true);
1199 }
1200 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1201
1202 static int ion_share_dma_buf_fd_nolock(struct ion_client *client,
1203                                        struct ion_handle *handle)
1204 {
1205         return __ion_share_dma_buf_fd(client, handle, false);
1206 }
1207
1208 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1209 {
1210         struct dma_buf *dmabuf;
1211         struct ion_buffer *buffer;
1212         struct ion_handle *handle;
1213         int ret;
1214
1215         dmabuf = dma_buf_get(fd);
1216         if (IS_ERR(dmabuf))
1217                 return ERR_CAST(dmabuf);
1218         /* if this memory came from ion */
1219
1220         if (dmabuf->ops != &dma_buf_ops) {
1221                 pr_err("%s: can not import dmabuf from another exporter\n",
1222                        __func__);
1223                 dma_buf_put(dmabuf);
1224                 return ERR_PTR(-EINVAL);
1225         }
1226         buffer = dmabuf->priv;
1227
1228         mutex_lock(&client->lock);
1229         /* if a handle exists for this buffer just take a reference to it */
1230         handle = ion_handle_lookup(client, buffer);
1231         if (!IS_ERR(handle)) {
1232                 handle = ion_handle_get_check_overflow(handle);
1233                 mutex_unlock(&client->lock);
1234                 goto end;
1235         }
1236
1237         handle = ion_handle_create(client, buffer);
1238         if (IS_ERR(handle)) {
1239                 mutex_unlock(&client->lock);
1240                 goto end;
1241         }
1242
1243         ret = ion_handle_add(client, handle);
1244         mutex_unlock(&client->lock);
1245         if (ret) {
1246                 ion_handle_put(handle);
1247                 handle = ERR_PTR(ret);
1248         }
1249
1250 end:
1251         dma_buf_put(dmabuf);
1252         return handle;
1253 }
1254 EXPORT_SYMBOL(ion_import_dma_buf);
1255
1256 static int ion_sync_for_device(struct ion_client *client, int fd)
1257 {
1258         struct dma_buf *dmabuf;
1259         struct ion_buffer *buffer;
1260
1261         dmabuf = dma_buf_get(fd);
1262         if (IS_ERR(dmabuf))
1263                 return PTR_ERR(dmabuf);
1264
1265         /* if this memory came from ion */
1266         if (dmabuf->ops != &dma_buf_ops) {
1267                 pr_err("%s: can not sync dmabuf from another exporter\n",
1268                        __func__);
1269                 dma_buf_put(dmabuf);
1270                 return -EINVAL;
1271         }
1272         buffer = dmabuf->priv;
1273
1274         dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1275                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1276         dma_buf_put(dmabuf);
1277         return 0;
1278 }
1279
1280 /* fix up the cases where the ioctl direction bits are incorrect */
1281 static unsigned int ion_ioctl_dir(unsigned int cmd)
1282 {
1283         switch (cmd) {
1284         case ION_IOC_SYNC:
1285         case ION_IOC_FREE:
1286         case ION_IOC_CUSTOM:
1287                 return _IOC_WRITE;
1288         default:
1289                 return _IOC_DIR(cmd);
1290         }
1291 }
1292
1293 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1294 {
1295         struct ion_client *client = filp->private_data;
1296         struct ion_device *dev = client->dev;
1297         struct ion_handle *cleanup_handle = NULL;
1298         int ret = 0;
1299         unsigned int dir;
1300
1301         union {
1302                 struct ion_fd_data fd;
1303                 struct ion_allocation_data allocation;
1304                 struct ion_handle_data handle;
1305                 struct ion_custom_data custom;
1306         } data;
1307
1308         dir = ion_ioctl_dir(cmd);
1309
1310         if (_IOC_SIZE(cmd) > sizeof(data))
1311                 return -EINVAL;
1312
1313         if (dir & _IOC_WRITE)
1314                 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1315                         return -EFAULT;
1316
1317         switch (cmd) {
1318         case ION_IOC_ALLOC:
1319         {
1320                 struct ion_handle *handle;
1321
1322                 handle = ion_alloc(client, data.allocation.len,
1323                                                 data.allocation.align,
1324                                                 data.allocation.heap_id_mask,
1325                                                 data.allocation.flags);
1326                 if (IS_ERR(handle))
1327                         return PTR_ERR(handle);
1328
1329                 data.allocation.handle = handle->id;
1330
1331                 cleanup_handle = handle;
1332                 break;
1333         }
1334         case ION_IOC_FREE:
1335         {
1336                 struct ion_handle *handle;
1337
1338                 mutex_lock(&client->lock);
1339                 handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
1340                 if (IS_ERR(handle)) {
1341                         mutex_unlock(&client->lock);
1342                         return PTR_ERR(handle);
1343                 }
1344                 ion_free_nolock(client, handle);
1345                 ion_handle_put_nolock(handle);
1346                 mutex_unlock(&client->lock);
1347                 break;
1348         }
1349         case ION_IOC_SHARE:
1350         case ION_IOC_MAP:
1351         {
1352                 struct ion_handle *handle;
1353
1354                 mutex_lock(&client->lock);
1355                 handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
1356                 if (IS_ERR(handle)) {
1357                         mutex_unlock(&client->lock);
1358                         return PTR_ERR(handle);
1359                 }
1360                 data.fd.fd = ion_share_dma_buf_fd_nolock(client, handle);
1361                 ion_handle_put_nolock(handle);
1362                 mutex_unlock(&client->lock);
1363                 if (data.fd.fd < 0)
1364                         ret = data.fd.fd;
1365                 break;
1366         }
1367         case ION_IOC_IMPORT:
1368         {
1369                 struct ion_handle *handle;
1370
1371                 handle = ion_import_dma_buf(client, data.fd.fd);
1372                 if (IS_ERR(handle))
1373                         ret = PTR_ERR(handle);
1374                 else
1375                         data.handle.handle = handle->id;
1376                 break;
1377         }
1378         case ION_IOC_SYNC:
1379         {
1380                 ret = ion_sync_for_device(client, data.fd.fd);
1381                 break;
1382         }
1383         case ION_IOC_CUSTOM:
1384         {
1385                 if (!dev->custom_ioctl)
1386                         return -ENOTTY;
1387                 ret = dev->custom_ioctl(client, data.custom.cmd,
1388                                                 data.custom.arg);
1389                 break;
1390         }
1391         default:
1392                 return -ENOTTY;
1393         }
1394
1395         if (dir & _IOC_READ) {
1396                 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1397                         if (cleanup_handle)
1398                                 ion_free(client, cleanup_handle);
1399                         return -EFAULT;
1400                 }
1401         }
1402         return ret;
1403 }
1404
1405 static int ion_release(struct inode *inode, struct file *file)
1406 {
1407         struct ion_client *client = file->private_data;
1408
1409         pr_debug("%s: %d\n", __func__, __LINE__);
1410         ion_client_destroy(client);
1411         return 0;
1412 }
1413
1414 static int ion_open(struct inode *inode, struct file *file)
1415 {
1416         struct miscdevice *miscdev = file->private_data;
1417         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1418         struct ion_client *client;
1419         char debug_name[64];
1420
1421         pr_debug("%s: %d\n", __func__, __LINE__);
1422         snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1423         client = ion_client_create(dev, debug_name);
1424         if (IS_ERR(client))
1425                 return PTR_ERR(client);
1426         file->private_data = client;
1427
1428         return 0;
1429 }
1430
1431 static const struct file_operations ion_fops = {
1432         .owner          = THIS_MODULE,
1433         .open           = ion_open,
1434         .release        = ion_release,
1435         .unlocked_ioctl = ion_ioctl,
1436         .compat_ioctl   = compat_ion_ioctl,
1437 };
1438
1439 static size_t ion_debug_heap_total(struct ion_client *client,
1440                                    unsigned int id)
1441 {
1442         size_t size = 0;
1443         struct rb_node *n;
1444
1445         mutex_lock(&client->lock);
1446         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1447                 struct ion_handle *handle = rb_entry(n,
1448                                                      struct ion_handle,
1449                                                      node);
1450                 if (handle->buffer->heap->id == id)
1451                         size += handle->buffer->size;
1452         }
1453         mutex_unlock(&client->lock);
1454         return size;
1455 }
1456
1457 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1458 {
1459         struct ion_heap *heap = s->private;
1460         struct ion_device *dev = heap->dev;
1461         struct rb_node *n;
1462         size_t total_size = 0;
1463         size_t total_orphaned_size = 0;
1464
1465         seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
1466         seq_puts(s, "----------------------------------------------------\n");
1467
1468         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1469                 struct ion_client *client = rb_entry(n, struct ion_client,
1470                                                      node);
1471                 size_t size = ion_debug_heap_total(client, heap->id);
1472
1473                 if (!size)
1474                         continue;
1475                 if (client->task) {
1476                         char task_comm[TASK_COMM_LEN];
1477
1478                         get_task_comm(task_comm, client->task);
1479                         seq_printf(s, "%16s %16u %16zu\n", task_comm,
1480                                    client->pid, size);
1481                 } else {
1482                         seq_printf(s, "%16s %16u %16zu\n", client->name,
1483                                    client->pid, size);
1484                 }
1485         }
1486         seq_puts(s, "----------------------------------------------------\n");
1487         seq_puts(s, "orphaned allocations (info is from last known client):\n");
1488         mutex_lock(&dev->buffer_lock);
1489         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1490                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1491                                                      node);
1492                 if (buffer->heap->id != heap->id)
1493                         continue;
1494                 total_size += buffer->size;
1495                 if (!buffer->handle_count) {
1496                         seq_printf(s, "%16s %16u %16zu %d %d\n",
1497                                    buffer->task_comm, buffer->pid,
1498                                    buffer->size, buffer->kmap_cnt,
1499                                    atomic_read(&buffer->ref.refcount));
1500                         total_orphaned_size += buffer->size;
1501                 }
1502         }
1503         mutex_unlock(&dev->buffer_lock);
1504         seq_puts(s, "----------------------------------------------------\n");
1505         seq_printf(s, "%16s %16zu\n", "total orphaned",
1506                    total_orphaned_size);
1507         seq_printf(s, "%16s %16zu\n", "total ", total_size);
1508         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1509                 seq_printf(s, "%16s %16zu\n", "deferred free",
1510                                 heap->free_list_size);
1511         seq_puts(s, "----------------------------------------------------\n");
1512
1513         if (heap->debug_show)
1514                 heap->debug_show(heap, s, unused);
1515
1516         return 0;
1517 }
1518
1519 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1520 {
1521         return single_open(file, ion_debug_heap_show, inode->i_private);
1522 }
1523
1524 static const struct file_operations debug_heap_fops = {
1525         .open = ion_debug_heap_open,
1526         .read = seq_read,
1527         .llseek = seq_lseek,
1528         .release = single_release,
1529 };
1530
1531 static int debug_shrink_set(void *data, u64 val)
1532 {
1533         struct ion_heap *heap = data;
1534         struct shrink_control sc;
1535         int objs;
1536
1537         sc.gfp_mask = -1;
1538         sc.nr_to_scan = val;
1539
1540         if (!val) {
1541                 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1542                 sc.nr_to_scan = objs;
1543         }
1544
1545         heap->shrinker.scan_objects(&heap->shrinker, &sc);
1546         return 0;
1547 }
1548
1549 static int debug_shrink_get(void *data, u64 *val)
1550 {
1551         struct ion_heap *heap = data;
1552         struct shrink_control sc;
1553         int objs;
1554
1555         sc.gfp_mask = -1;
1556         sc.nr_to_scan = 0;
1557
1558         objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1559         *val = objs;
1560         return 0;
1561 }
1562
1563 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1564                         debug_shrink_set, "%llu\n");
1565
1566 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1567 {
1568         struct dentry *debug_file;
1569
1570         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1571             !heap->ops->unmap_dma)
1572                 pr_err("%s: can not add heap with invalid ops struct.\n",
1573                        __func__);
1574
1575         spin_lock_init(&heap->free_lock);
1576         heap->free_list_size = 0;
1577
1578         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1579                 ion_heap_init_deferred_free(heap);
1580
1581         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1582                 ion_heap_init_shrinker(heap);
1583
1584         heap->dev = dev;
1585         down_write(&dev->lock);
1586         /*
1587          * use negative heap->id to reverse the priority -- when traversing
1588          * the list later attempt higher id numbers first
1589          */
1590         plist_node_init(&heap->node, -heap->id);
1591         plist_add(&heap->node, &dev->heaps);
1592         debug_file = debugfs_create_file(heap->name, 0664,
1593                                         dev->heaps_debug_root, heap,
1594                                         &debug_heap_fops);
1595
1596         if (!debug_file) {
1597                 char buf[256], *path;
1598
1599                 path = dentry_path(dev->heaps_debug_root, buf, 256);
1600                 pr_err("Failed to create heap debugfs at %s/%s\n",
1601                         path, heap->name);
1602         }
1603
1604         if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
1605                 char debug_name[64];
1606
1607                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1608                 debug_file = debugfs_create_file(
1609                         debug_name, 0644, dev->heaps_debug_root, heap,
1610                         &debug_shrink_fops);
1611                 if (!debug_file) {
1612                         char buf[256], *path;
1613
1614                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1615                         pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1616                                 path, debug_name);
1617                 }
1618         }
1619
1620         up_write(&dev->lock);
1621 }
1622 EXPORT_SYMBOL(ion_device_add_heap);
1623
1624 struct ion_device *ion_device_create(long (*custom_ioctl)
1625                                      (struct ion_client *client,
1626                                       unsigned int cmd,
1627                                       unsigned long arg))
1628 {
1629         struct ion_device *idev;
1630         int ret;
1631
1632         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1633         if (!idev)
1634                 return ERR_PTR(-ENOMEM);
1635
1636         idev->dev.minor = MISC_DYNAMIC_MINOR;
1637         idev->dev.name = "ion";
1638         idev->dev.fops = &ion_fops;
1639         idev->dev.parent = NULL;
1640         ret = misc_register(&idev->dev);
1641         if (ret) {
1642                 pr_err("ion: failed to register misc device.\n");
1643                 kfree(idev);
1644                 return ERR_PTR(ret);
1645         }
1646
1647         idev->debug_root = debugfs_create_dir("ion", NULL);
1648         if (!idev->debug_root) {
1649                 pr_err("ion: failed to create debugfs root directory.\n");
1650                 goto debugfs_done;
1651         }
1652         idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1653         if (!idev->heaps_debug_root) {
1654                 pr_err("ion: failed to create debugfs heaps directory.\n");
1655                 goto debugfs_done;
1656         }
1657         idev->clients_debug_root = debugfs_create_dir("clients",
1658                                                 idev->debug_root);
1659         if (!idev->clients_debug_root)
1660                 pr_err("ion: failed to create debugfs clients directory.\n");
1661
1662 debugfs_done:
1663
1664         idev->custom_ioctl = custom_ioctl;
1665         idev->buffers = RB_ROOT;
1666         mutex_init(&idev->buffer_lock);
1667         init_rwsem(&idev->lock);
1668         plist_head_init(&idev->heaps);
1669         idev->clients = RB_ROOT;
1670         return idev;
1671 }
1672 EXPORT_SYMBOL(ion_device_create);
1673
1674 void ion_device_destroy(struct ion_device *dev)
1675 {
1676         misc_deregister(&dev->dev);
1677         debugfs_remove_recursive(dev->debug_root);
1678         /* XXX need to free the heaps and clients ? */
1679         kfree(dev);
1680 }
1681 EXPORT_SYMBOL(ion_device_destroy);
1682
1683 void __init ion_reserve(struct ion_platform_data *data)
1684 {
1685         int i;
1686
1687         for (i = 0; i < data->nr; i++) {
1688                 if (data->heaps[i].size == 0)
1689                         continue;
1690
1691                 if (data->heaps[i].base == 0) {
1692                         phys_addr_t paddr;
1693
1694                         paddr = memblock_alloc_base(data->heaps[i].size,
1695                                                     data->heaps[i].align,
1696                                                     MEMBLOCK_ALLOC_ANYWHERE);
1697                         if (!paddr) {
1698                                 pr_err("%s: error allocating memblock for heap %d\n",
1699                                         __func__, i);
1700                                 continue;
1701                         }
1702                         data->heaps[i].base = paddr;
1703                 } else {
1704                         int ret = memblock_reserve(data->heaps[i].base,
1705                                                data->heaps[i].size);
1706                         if (ret)
1707                                 pr_err("memblock reserve of %zx@%lx failed\n",
1708                                        data->heaps[i].size,
1709                                        data->heaps[i].base);
1710                 }
1711                 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1712                         data->heaps[i].name,
1713                         data->heaps[i].base,
1714                         data->heaps[i].size);
1715         }
1716 }