2 * drivers/staging/android/ion/ion_system_heap.c
4 * Copyright (C) 2011 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/highmem.h>
22 #include <linux/scatterlist.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
28 #define NUM_ORDERS ARRAY_SIZE(orders)
30 static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
31 __GFP_NORETRY) & ~__GFP_RECLAIM;
32 static gfp_t low_order_gfp_flags = GFP_HIGHUSER | __GFP_ZERO;
33 static const unsigned int orders[] = {8, 4, 0};
35 static int order_to_index(unsigned int order)
39 for (i = 0; i < NUM_ORDERS; i++)
40 if (order == orders[i])
46 static inline unsigned int order_to_size(int order)
48 return PAGE_SIZE << order;
51 struct ion_system_heap {
53 struct ion_page_pool *uncached_pools[NUM_ORDERS];
54 struct ion_page_pool *cached_pools[NUM_ORDERS];
58 * The page from page-pool are all zeroed before. We need do cache
59 * clean for cached buffer. The uncached buffer are always non-cached
60 * since it's allocated. So no need for non-cached pages.
62 static struct page *alloc_buffer_page(struct ion_system_heap *heap,
63 struct ion_buffer *buffer,
66 bool cached = ion_buffer_cached(buffer);
67 struct ion_page_pool *pool;
71 pool = heap->uncached_pools[order_to_index(order)];
73 pool = heap->cached_pools[order_to_index(order)];
75 page = ion_page_pool_alloc(pool);
80 static void free_buffer_page(struct ion_system_heap *heap,
81 struct ion_buffer *buffer, struct page *page)
83 struct ion_page_pool *pool;
84 unsigned int order = compound_order(page);
85 bool cached = ion_buffer_cached(buffer);
88 if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
89 __free_pages(page, order);
94 pool = heap->uncached_pools[order_to_index(order)];
96 pool = heap->cached_pools[order_to_index(order)];
98 ion_page_pool_free(pool, page);
101 static struct page *alloc_largest_available(struct ion_system_heap *heap,
102 struct ion_buffer *buffer,
104 unsigned int max_order)
109 for (i = 0; i < NUM_ORDERS; i++) {
110 if (size < order_to_size(orders[i]))
112 if (max_order < orders[i])
115 page = alloc_buffer_page(heap, buffer, orders[i]);
125 static int ion_system_heap_allocate(struct ion_heap *heap,
126 struct ion_buffer *buffer,
130 struct ion_system_heap *sys_heap = container_of(heap,
131 struct ion_system_heap,
133 struct sg_table *table;
134 struct scatterlist *sg;
135 struct list_head pages;
136 struct page *page, *tmp_page;
138 unsigned long size_remaining = PAGE_ALIGN(size);
139 unsigned int max_order = orders[0];
141 if (size / PAGE_SIZE > totalram_pages / 2)
144 INIT_LIST_HEAD(&pages);
145 while (size_remaining > 0) {
146 page = alloc_largest_available(sys_heap, buffer, size_remaining,
150 list_add_tail(&page->lru, &pages);
151 size_remaining -= PAGE_SIZE << compound_order(page);
152 max_order = compound_order(page);
155 table = kmalloc(sizeof(*table), GFP_KERNEL);
159 if (sg_alloc_table(table, i, GFP_KERNEL))
163 list_for_each_entry_safe(page, tmp_page, &pages, lru) {
164 sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
166 list_del(&page->lru);
169 buffer->sg_table = table;
175 list_for_each_entry_safe(page, tmp_page, &pages, lru)
176 free_buffer_page(sys_heap, buffer, page);
180 static void ion_system_heap_free(struct ion_buffer *buffer)
182 struct ion_system_heap *sys_heap = container_of(buffer->heap,
183 struct ion_system_heap,
185 struct sg_table *table = buffer->sg_table;
186 struct scatterlist *sg;
189 /* zero the buffer before goto page pool */
190 if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
191 ion_heap_buffer_zero(buffer);
193 for_each_sg(table->sgl, sg, table->nents, i)
194 free_buffer_page(sys_heap, buffer, sg_page(sg));
195 sg_free_table(table);
199 static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
202 struct ion_page_pool *uncached_pool;
203 struct ion_page_pool *cached_pool;
204 struct ion_system_heap *sys_heap;
209 sys_heap = container_of(heap, struct ion_system_heap, heap);
214 for (i = 0; i < NUM_ORDERS; i++) {
215 uncached_pool = sys_heap->uncached_pools[i];
216 cached_pool = sys_heap->cached_pools[i];
219 nr_total += ion_page_pool_shrink(uncached_pool,
223 nr_total += ion_page_pool_shrink(cached_pool,
227 nr_freed = ion_page_pool_shrink(uncached_pool,
230 nr_to_scan -= nr_freed;
231 nr_total += nr_freed;
234 nr_freed = ion_page_pool_shrink(cached_pool,
237 nr_to_scan -= nr_freed;
238 nr_total += nr_freed;
246 static struct ion_heap_ops system_heap_ops = {
247 .allocate = ion_system_heap_allocate,
248 .free = ion_system_heap_free,
249 .map_kernel = ion_heap_map_kernel,
250 .unmap_kernel = ion_heap_unmap_kernel,
251 .map_user = ion_heap_map_user,
252 .shrink = ion_system_heap_shrink,
255 static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
258 struct ion_system_heap *sys_heap = container_of(heap,
259 struct ion_system_heap,
262 struct ion_page_pool *pool;
264 for (i = 0; i < NUM_ORDERS; i++) {
265 pool = sys_heap->uncached_pools[i];
267 seq_printf(s, "%d order %u highmem pages uncached %lu total\n",
268 pool->high_count, pool->order,
269 (PAGE_SIZE << pool->order) * pool->high_count);
270 seq_printf(s, "%d order %u lowmem pages uncached %lu total\n",
271 pool->low_count, pool->order,
272 (PAGE_SIZE << pool->order) * pool->low_count);
275 for (i = 0; i < NUM_ORDERS; i++) {
276 pool = sys_heap->cached_pools[i];
278 seq_printf(s, "%d order %u highmem pages cached %lu total\n",
279 pool->high_count, pool->order,
280 (PAGE_SIZE << pool->order) * pool->high_count);
281 seq_printf(s, "%d order %u lowmem pages cached %lu total\n",
282 pool->low_count, pool->order,
283 (PAGE_SIZE << pool->order) * pool->low_count);
288 static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
292 for (i = 0; i < NUM_ORDERS; i++)
294 ion_page_pool_destroy(pools[i]);
297 static int ion_system_heap_create_pools(struct ion_page_pool **pools,
302 for (i = 0; i < NUM_ORDERS; i++) {
303 struct ion_page_pool *pool;
304 gfp_t gfp_flags = low_order_gfp_flags;
307 gfp_flags = high_order_gfp_flags;
309 pool = ion_page_pool_create(gfp_flags, orders[i], cached);
311 goto err_create_pool;
317 ion_system_heap_destroy_pools(pools);
321 static struct ion_heap *__ion_system_heap_create(void)
323 struct ion_system_heap *heap;
325 heap = kzalloc(sizeof(*heap), GFP_KERNEL);
327 return ERR_PTR(-ENOMEM);
328 heap->heap.ops = &system_heap_ops;
329 heap->heap.type = ION_HEAP_TYPE_SYSTEM;
330 heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
332 if (ion_system_heap_create_pools(heap->uncached_pools, false))
335 if (ion_system_heap_create_pools(heap->cached_pools, true))
336 goto destroy_uncached_pools;
338 heap->heap.debug_show = ion_system_heap_debug_show;
341 destroy_uncached_pools:
342 ion_system_heap_destroy_pools(heap->uncached_pools);
346 return ERR_PTR(-ENOMEM);
349 static int ion_system_heap_create(void)
351 struct ion_heap *heap;
353 heap = __ion_system_heap_create();
355 return PTR_ERR(heap);
356 heap->name = "ion_system_heap";
358 ion_device_add_heap(heap);
361 device_initcall(ion_system_heap_create);
363 static int ion_system_contig_heap_allocate(struct ion_heap *heap,
364 struct ion_buffer *buffer,
368 int order = get_order(len);
370 struct sg_table *table;
374 page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order);
378 split_page(page, order);
380 len = PAGE_ALIGN(len);
381 for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
382 __free_page(page + i);
384 table = kmalloc(sizeof(*table), GFP_KERNEL);
390 ret = sg_alloc_table(table, 1, GFP_KERNEL);
394 sg_set_page(table->sgl, page, len, 0);
396 buffer->sg_table = table;
403 for (i = 0; i < len >> PAGE_SHIFT; i++)
404 __free_page(page + i);
409 static void ion_system_contig_heap_free(struct ion_buffer *buffer)
411 struct sg_table *table = buffer->sg_table;
412 struct page *page = sg_page(table->sgl);
413 unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
416 for (i = 0; i < pages; i++)
417 __free_page(page + i);
418 sg_free_table(table);
422 static struct ion_heap_ops kmalloc_ops = {
423 .allocate = ion_system_contig_heap_allocate,
424 .free = ion_system_contig_heap_free,
425 .map_kernel = ion_heap_map_kernel,
426 .unmap_kernel = ion_heap_unmap_kernel,
427 .map_user = ion_heap_map_user,
430 static struct ion_heap *__ion_system_contig_heap_create(void)
432 struct ion_heap *heap;
434 heap = kzalloc(sizeof(*heap), GFP_KERNEL);
436 return ERR_PTR(-ENOMEM);
437 heap->ops = &kmalloc_ops;
438 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
439 heap->name = "ion_system_contig_heap";
443 static int ion_system_contig_heap_create(void)
445 struct ion_heap *heap;
447 heap = __ion_system_contig_heap_create();
449 return PTR_ERR(heap);
451 ion_device_add_heap(heap);
454 device_initcall(ion_system_contig_heap_create);