GNU Linux-libre 4.14.302-gnu1
[releases.git] / drivers / staging / android / ion / ion_system_heap.c
1 /*
2  * drivers/staging/android/ion/ion_system_heap.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <asm/page.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/highmem.h>
21 #include <linux/mm.h>
22 #include <linux/scatterlist.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
26 #include "ion.h"
27
28 #define NUM_ORDERS ARRAY_SIZE(orders)
29
30 static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
31                                      __GFP_NORETRY) & ~__GFP_RECLAIM;
32 static gfp_t low_order_gfp_flags  = GFP_HIGHUSER | __GFP_ZERO;
33 static const unsigned int orders[] = {8, 4, 0};
34
35 static int order_to_index(unsigned int order)
36 {
37         int i;
38
39         for (i = 0; i < NUM_ORDERS; i++)
40                 if (order == orders[i])
41                         return i;
42         BUG();
43         return -1;
44 }
45
46 static inline unsigned int order_to_size(int order)
47 {
48         return PAGE_SIZE << order;
49 }
50
51 struct ion_system_heap {
52         struct ion_heap heap;
53         struct ion_page_pool *uncached_pools[NUM_ORDERS];
54         struct ion_page_pool *cached_pools[NUM_ORDERS];
55 };
56
57 /**
58  * The page from page-pool are all zeroed before. We need do cache
59  * clean for cached buffer. The uncached buffer are always non-cached
60  * since it's allocated. So no need for non-cached pages.
61  */
62 static struct page *alloc_buffer_page(struct ion_system_heap *heap,
63                                       struct ion_buffer *buffer,
64                                       unsigned long order)
65 {
66         bool cached = ion_buffer_cached(buffer);
67         struct ion_page_pool *pool;
68         struct page *page;
69
70         if (!cached)
71                 pool = heap->uncached_pools[order_to_index(order)];
72         else
73                 pool = heap->cached_pools[order_to_index(order)];
74
75         page = ion_page_pool_alloc(pool);
76
77         return page;
78 }
79
80 static void free_buffer_page(struct ion_system_heap *heap,
81                              struct ion_buffer *buffer, struct page *page)
82 {
83         struct ion_page_pool *pool;
84         unsigned int order = compound_order(page);
85         bool cached = ion_buffer_cached(buffer);
86
87         /* go to system */
88         if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
89                 __free_pages(page, order);
90                 return;
91         }
92
93         if (!cached)
94                 pool = heap->uncached_pools[order_to_index(order)];
95         else
96                 pool = heap->cached_pools[order_to_index(order)];
97
98         ion_page_pool_free(pool, page);
99 }
100
101 static struct page *alloc_largest_available(struct ion_system_heap *heap,
102                                             struct ion_buffer *buffer,
103                                             unsigned long size,
104                                             unsigned int max_order)
105 {
106         struct page *page;
107         int i;
108
109         for (i = 0; i < NUM_ORDERS; i++) {
110                 if (size < order_to_size(orders[i]))
111                         continue;
112                 if (max_order < orders[i])
113                         continue;
114
115                 page = alloc_buffer_page(heap, buffer, orders[i]);
116                 if (!page)
117                         continue;
118
119                 return page;
120         }
121
122         return NULL;
123 }
124
125 static int ion_system_heap_allocate(struct ion_heap *heap,
126                                     struct ion_buffer *buffer,
127                                     unsigned long size,
128                                     unsigned long flags)
129 {
130         struct ion_system_heap *sys_heap = container_of(heap,
131                                                         struct ion_system_heap,
132                                                         heap);
133         struct sg_table *table;
134         struct scatterlist *sg;
135         struct list_head pages;
136         struct page *page, *tmp_page;
137         int i = 0;
138         unsigned long size_remaining = PAGE_ALIGN(size);
139         unsigned int max_order = orders[0];
140
141         if (size / PAGE_SIZE > totalram_pages / 2)
142                 return -ENOMEM;
143
144         INIT_LIST_HEAD(&pages);
145         while (size_remaining > 0) {
146                 page = alloc_largest_available(sys_heap, buffer, size_remaining,
147                                                max_order);
148                 if (!page)
149                         goto free_pages;
150                 list_add_tail(&page->lru, &pages);
151                 size_remaining -= PAGE_SIZE << compound_order(page);
152                 max_order = compound_order(page);
153                 i++;
154         }
155         table = kmalloc(sizeof(*table), GFP_KERNEL);
156         if (!table)
157                 goto free_pages;
158
159         if (sg_alloc_table(table, i, GFP_KERNEL))
160                 goto free_table;
161
162         sg = table->sgl;
163         list_for_each_entry_safe(page, tmp_page, &pages, lru) {
164                 sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
165                 sg = sg_next(sg);
166                 list_del(&page->lru);
167         }
168
169         buffer->sg_table = table;
170         return 0;
171
172 free_table:
173         kfree(table);
174 free_pages:
175         list_for_each_entry_safe(page, tmp_page, &pages, lru)
176                 free_buffer_page(sys_heap, buffer, page);
177         return -ENOMEM;
178 }
179
180 static void ion_system_heap_free(struct ion_buffer *buffer)
181 {
182         struct ion_system_heap *sys_heap = container_of(buffer->heap,
183                                                         struct ion_system_heap,
184                                                         heap);
185         struct sg_table *table = buffer->sg_table;
186         struct scatterlist *sg;
187         int i;
188
189         /* zero the buffer before goto page pool */
190         if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
191                 ion_heap_buffer_zero(buffer);
192
193         for_each_sg(table->sgl, sg, table->nents, i)
194                 free_buffer_page(sys_heap, buffer, sg_page(sg));
195         sg_free_table(table);
196         kfree(table);
197 }
198
199 static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
200                                   int nr_to_scan)
201 {
202         struct ion_page_pool *uncached_pool;
203         struct ion_page_pool *cached_pool;
204         struct ion_system_heap *sys_heap;
205         int nr_total = 0;
206         int i, nr_freed;
207         int only_scan = 0;
208
209         sys_heap = container_of(heap, struct ion_system_heap, heap);
210
211         if (!nr_to_scan)
212                 only_scan = 1;
213
214         for (i = 0; i < NUM_ORDERS; i++) {
215                 uncached_pool = sys_heap->uncached_pools[i];
216                 cached_pool = sys_heap->cached_pools[i];
217
218                 if (only_scan) {
219                         nr_total += ion_page_pool_shrink(uncached_pool,
220                                                          gfp_mask,
221                                                          nr_to_scan);
222
223                         nr_total += ion_page_pool_shrink(cached_pool,
224                                                          gfp_mask,
225                                                          nr_to_scan);
226                 } else {
227                         nr_freed = ion_page_pool_shrink(uncached_pool,
228                                                         gfp_mask,
229                                                         nr_to_scan);
230                         nr_to_scan -= nr_freed;
231                         nr_total += nr_freed;
232                         if (nr_to_scan <= 0)
233                                 break;
234                         nr_freed = ion_page_pool_shrink(cached_pool,
235                                                         gfp_mask,
236                                                         nr_to_scan);
237                         nr_to_scan -= nr_freed;
238                         nr_total += nr_freed;
239                         if (nr_to_scan <= 0)
240                                 break;
241                 }
242         }
243         return nr_total;
244 }
245
246 static struct ion_heap_ops system_heap_ops = {
247         .allocate = ion_system_heap_allocate,
248         .free = ion_system_heap_free,
249         .map_kernel = ion_heap_map_kernel,
250         .unmap_kernel = ion_heap_unmap_kernel,
251         .map_user = ion_heap_map_user,
252         .shrink = ion_system_heap_shrink,
253 };
254
255 static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
256                                       void *unused)
257 {
258         struct ion_system_heap *sys_heap = container_of(heap,
259                                                         struct ion_system_heap,
260                                                         heap);
261         int i;
262         struct ion_page_pool *pool;
263
264         for (i = 0; i < NUM_ORDERS; i++) {
265                 pool = sys_heap->uncached_pools[i];
266
267                 seq_printf(s, "%d order %u highmem pages uncached %lu total\n",
268                            pool->high_count, pool->order,
269                            (PAGE_SIZE << pool->order) * pool->high_count);
270                 seq_printf(s, "%d order %u lowmem pages uncached %lu total\n",
271                            pool->low_count, pool->order,
272                            (PAGE_SIZE << pool->order) * pool->low_count);
273         }
274
275         for (i = 0; i < NUM_ORDERS; i++) {
276                 pool = sys_heap->cached_pools[i];
277
278                 seq_printf(s, "%d order %u highmem pages cached %lu total\n",
279                            pool->high_count, pool->order,
280                            (PAGE_SIZE << pool->order) * pool->high_count);
281                 seq_printf(s, "%d order %u lowmem pages cached %lu total\n",
282                            pool->low_count, pool->order,
283                            (PAGE_SIZE << pool->order) * pool->low_count);
284         }
285         return 0;
286 }
287
288 static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
289 {
290         int i;
291
292         for (i = 0; i < NUM_ORDERS; i++)
293                 if (pools[i])
294                         ion_page_pool_destroy(pools[i]);
295 }
296
297 static int ion_system_heap_create_pools(struct ion_page_pool **pools,
298                                         bool cached)
299 {
300         int i;
301
302         for (i = 0; i < NUM_ORDERS; i++) {
303                 struct ion_page_pool *pool;
304                 gfp_t gfp_flags = low_order_gfp_flags;
305
306                 if (orders[i] > 4)
307                         gfp_flags = high_order_gfp_flags;
308
309                 pool = ion_page_pool_create(gfp_flags, orders[i], cached);
310                 if (!pool)
311                         goto err_create_pool;
312                 pools[i] = pool;
313         }
314         return 0;
315
316 err_create_pool:
317         ion_system_heap_destroy_pools(pools);
318         return -ENOMEM;
319 }
320
321 static struct ion_heap *__ion_system_heap_create(void)
322 {
323         struct ion_system_heap *heap;
324
325         heap = kzalloc(sizeof(*heap), GFP_KERNEL);
326         if (!heap)
327                 return ERR_PTR(-ENOMEM);
328         heap->heap.ops = &system_heap_ops;
329         heap->heap.type = ION_HEAP_TYPE_SYSTEM;
330         heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
331
332         if (ion_system_heap_create_pools(heap->uncached_pools, false))
333                 goto free_heap;
334
335         if (ion_system_heap_create_pools(heap->cached_pools, true))
336                 goto destroy_uncached_pools;
337
338         heap->heap.debug_show = ion_system_heap_debug_show;
339         return &heap->heap;
340
341 destroy_uncached_pools:
342         ion_system_heap_destroy_pools(heap->uncached_pools);
343
344 free_heap:
345         kfree(heap);
346         return ERR_PTR(-ENOMEM);
347 }
348
349 static int ion_system_heap_create(void)
350 {
351         struct ion_heap *heap;
352
353         heap = __ion_system_heap_create();
354         if (IS_ERR(heap))
355                 return PTR_ERR(heap);
356         heap->name = "ion_system_heap";
357
358         ion_device_add_heap(heap);
359         return 0;
360 }
361 device_initcall(ion_system_heap_create);
362
363 static int ion_system_contig_heap_allocate(struct ion_heap *heap,
364                                            struct ion_buffer *buffer,
365                                            unsigned long len,
366                                            unsigned long flags)
367 {
368         int order = get_order(len);
369         struct page *page;
370         struct sg_table *table;
371         unsigned long i;
372         int ret;
373
374         page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order);
375         if (!page)
376                 return -ENOMEM;
377
378         split_page(page, order);
379
380         len = PAGE_ALIGN(len);
381         for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
382                 __free_page(page + i);
383
384         table = kmalloc(sizeof(*table), GFP_KERNEL);
385         if (!table) {
386                 ret = -ENOMEM;
387                 goto free_pages;
388         }
389
390         ret = sg_alloc_table(table, 1, GFP_KERNEL);
391         if (ret)
392                 goto free_table;
393
394         sg_set_page(table->sgl, page, len, 0);
395
396         buffer->sg_table = table;
397
398         return 0;
399
400 free_table:
401         kfree(table);
402 free_pages:
403         for (i = 0; i < len >> PAGE_SHIFT; i++)
404                 __free_page(page + i);
405
406         return ret;
407 }
408
409 static void ion_system_contig_heap_free(struct ion_buffer *buffer)
410 {
411         struct sg_table *table = buffer->sg_table;
412         struct page *page = sg_page(table->sgl);
413         unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
414         unsigned long i;
415
416         for (i = 0; i < pages; i++)
417                 __free_page(page + i);
418         sg_free_table(table);
419         kfree(table);
420 }
421
422 static struct ion_heap_ops kmalloc_ops = {
423         .allocate = ion_system_contig_heap_allocate,
424         .free = ion_system_contig_heap_free,
425         .map_kernel = ion_heap_map_kernel,
426         .unmap_kernel = ion_heap_unmap_kernel,
427         .map_user = ion_heap_map_user,
428 };
429
430 static struct ion_heap *__ion_system_contig_heap_create(void)
431 {
432         struct ion_heap *heap;
433
434         heap = kzalloc(sizeof(*heap), GFP_KERNEL);
435         if (!heap)
436                 return ERR_PTR(-ENOMEM);
437         heap->ops = &kmalloc_ops;
438         heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
439         heap->name = "ion_system_contig_heap";
440         return heap;
441 }
442
443 static int ion_system_contig_heap_create(void)
444 {
445         struct ion_heap *heap;
446
447         heap = __ion_system_contig_heap_create();
448         if (IS_ERR(heap))
449                 return PTR_ERR(heap);
450
451         ion_device_add_heap(heap);
452         return 0;
453 }
454 device_initcall(ion_system_contig_heap_create);
455