2 * drivers/staging/android/ion/ion_system_heap.c
4 * Copyright (C) 2011 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/highmem.h>
22 #include <linux/scatterlist.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
29 #define NUM_ORDERS ARRAY_SIZE(orders)
31 static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
32 __GFP_NORETRY) & ~__GFP_RECLAIM;
33 static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO);
34 static const unsigned int orders[] = {8, 4, 0};
36 static int order_to_index(unsigned int order)
40 for (i = 0; i < NUM_ORDERS; i++)
41 if (order == orders[i])
47 static inline unsigned int order_to_size(int order)
49 return PAGE_SIZE << order;
52 struct ion_system_heap {
54 struct ion_page_pool *uncached_pools[NUM_ORDERS];
55 struct ion_page_pool *cached_pools[NUM_ORDERS];
59 * The page from page-pool are all zeroed before. We need do cache
60 * clean for cached buffer. The uncached buffer are always non-cached
61 * since it's allocated. So no need for non-cached pages.
63 static struct page *alloc_buffer_page(struct ion_system_heap *heap,
64 struct ion_buffer *buffer,
67 bool cached = ion_buffer_cached(buffer);
68 struct ion_page_pool *pool;
72 pool = heap->uncached_pools[order_to_index(order)];
74 pool = heap->cached_pools[order_to_index(order)];
76 page = ion_page_pool_alloc(pool);
79 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
84 static void free_buffer_page(struct ion_system_heap *heap,
85 struct ion_buffer *buffer, struct page *page)
87 struct ion_page_pool *pool;
88 unsigned int order = compound_order(page);
89 bool cached = ion_buffer_cached(buffer);
92 if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
93 __free_pages(page, order);
98 pool = heap->uncached_pools[order_to_index(order)];
100 pool = heap->cached_pools[order_to_index(order)];
102 ion_page_pool_free(pool, page);
106 static struct page *alloc_largest_available(struct ion_system_heap *heap,
107 struct ion_buffer *buffer,
109 unsigned int max_order)
114 for (i = 0; i < NUM_ORDERS; i++) {
115 if (size < order_to_size(orders[i]))
117 if (max_order < orders[i])
120 page = alloc_buffer_page(heap, buffer, orders[i]);
130 static int ion_system_heap_allocate(struct ion_heap *heap,
131 struct ion_buffer *buffer,
132 unsigned long size, unsigned long align,
135 struct ion_system_heap *sys_heap = container_of(heap,
136 struct ion_system_heap,
138 struct sg_table *table;
139 struct scatterlist *sg;
140 struct list_head pages;
141 struct page *page, *tmp_page;
143 unsigned long size_remaining = PAGE_ALIGN(size);
144 unsigned int max_order = orders[0];
146 if (align > PAGE_SIZE)
149 if (size / PAGE_SIZE > totalram_pages / 2)
152 INIT_LIST_HEAD(&pages);
153 while (size_remaining > 0) {
154 page = alloc_largest_available(sys_heap, buffer, size_remaining,
158 list_add_tail(&page->lru, &pages);
159 size_remaining -= PAGE_SIZE << compound_order(page);
160 max_order = compound_order(page);
163 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
167 if (sg_alloc_table(table, i, GFP_KERNEL))
171 list_for_each_entry_safe(page, tmp_page, &pages, lru) {
172 sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
174 list_del(&page->lru);
177 buffer->sg_table = table;
183 list_for_each_entry_safe(page, tmp_page, &pages, lru)
184 free_buffer_page(sys_heap, buffer, page);
188 static void ion_system_heap_free(struct ion_buffer *buffer)
190 struct ion_system_heap *sys_heap = container_of(buffer->heap,
191 struct ion_system_heap,
193 struct sg_table *table = buffer->sg_table;
194 struct scatterlist *sg;
197 /* zero the buffer before goto page pool */
198 if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
199 ion_heap_buffer_zero(buffer);
201 for_each_sg(table->sgl, sg, table->nents, i)
202 free_buffer_page(sys_heap, buffer, sg_page(sg));
203 sg_free_table(table);
207 static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
210 struct ion_page_pool *uncached_pool;
211 struct ion_page_pool *cached_pool;
212 struct ion_system_heap *sys_heap;
217 sys_heap = container_of(heap, struct ion_system_heap, heap);
222 for (i = 0; i < NUM_ORDERS; i++) {
223 uncached_pool = sys_heap->uncached_pools[i];
224 cached_pool = sys_heap->cached_pools[i];
227 nr_total += ion_page_pool_shrink(uncached_pool,
231 nr_total += ion_page_pool_shrink(cached_pool,
235 nr_freed = ion_page_pool_shrink(uncached_pool,
238 nr_to_scan -= nr_freed;
239 nr_total += nr_freed;
242 nr_freed = ion_page_pool_shrink(cached_pool,
245 nr_to_scan -= nr_freed;
246 nr_total += nr_freed;
254 static struct ion_heap_ops system_heap_ops = {
255 .allocate = ion_system_heap_allocate,
256 .free = ion_system_heap_free,
257 .map_kernel = ion_heap_map_kernel,
258 .unmap_kernel = ion_heap_unmap_kernel,
259 .map_user = ion_heap_map_user,
260 .shrink = ion_system_heap_shrink,
263 static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
267 struct ion_system_heap *sys_heap = container_of(heap,
268 struct ion_system_heap,
271 struct ion_page_pool *pool;
273 for (i = 0; i < NUM_ORDERS; i++) {
274 pool = sys_heap->uncached_pools[i];
276 seq_printf(s, "%d order %u highmem pages uncached %lu total\n",
277 pool->high_count, pool->order,
278 (PAGE_SIZE << pool->order) * pool->high_count);
279 seq_printf(s, "%d order %u lowmem pages uncached %lu total\n",
280 pool->low_count, pool->order,
281 (PAGE_SIZE << pool->order) * pool->low_count);
284 for (i = 0; i < NUM_ORDERS; i++) {
285 pool = sys_heap->cached_pools[i];
287 seq_printf(s, "%d order %u highmem pages cached %lu total\n",
288 pool->high_count, pool->order,
289 (PAGE_SIZE << pool->order) * pool->high_count);
290 seq_printf(s, "%d order %u lowmem pages cached %lu total\n",
291 pool->low_count, pool->order,
292 (PAGE_SIZE << pool->order) * pool->low_count);
297 static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
301 for (i = 0; i < NUM_ORDERS; i++)
303 ion_page_pool_destroy(pools[i]);
306 static int ion_system_heap_create_pools(struct ion_page_pool **pools,
311 for (i = 0; i < NUM_ORDERS; i++) {
312 struct ion_page_pool *pool;
313 gfp_t gfp_flags = low_order_gfp_flags;
316 gfp_flags = high_order_gfp_flags;
318 pool = ion_page_pool_create(gfp_flags, orders[i], cached);
320 goto err_create_pool;
326 ion_system_heap_destroy_pools(pools);
330 struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
332 struct ion_system_heap *heap;
334 heap = kzalloc(sizeof(*heap), GFP_KERNEL);
336 return ERR_PTR(-ENOMEM);
337 heap->heap.ops = &system_heap_ops;
338 heap->heap.type = ION_HEAP_TYPE_SYSTEM;
339 heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
341 if (ion_system_heap_create_pools(heap->uncached_pools, false))
344 if (ion_system_heap_create_pools(heap->cached_pools, true))
345 goto destroy_uncached_pools;
347 heap->heap.debug_show = ion_system_heap_debug_show;
350 destroy_uncached_pools:
351 ion_system_heap_destroy_pools(heap->uncached_pools);
355 return ERR_PTR(-ENOMEM);
358 void ion_system_heap_destroy(struct ion_heap *heap)
360 struct ion_system_heap *sys_heap = container_of(heap,
361 struct ion_system_heap,
365 for (i = 0; i < NUM_ORDERS; i++) {
366 ion_page_pool_destroy(sys_heap->uncached_pools[i]);
367 ion_page_pool_destroy(sys_heap->cached_pools[i]);
372 static int ion_system_contig_heap_allocate(struct ion_heap *heap,
373 struct ion_buffer *buffer,
378 int order = get_order(len);
380 struct sg_table *table;
384 if (align > (PAGE_SIZE << order))
387 page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order);
391 split_page(page, order);
393 len = PAGE_ALIGN(len);
394 for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
395 __free_page(page + i);
397 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
403 ret = sg_alloc_table(table, 1, GFP_KERNEL);
407 sg_set_page(table->sgl, page, len, 0);
409 buffer->sg_table = table;
411 ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
418 for (i = 0; i < len >> PAGE_SHIFT; i++)
419 __free_page(page + i);
424 static void ion_system_contig_heap_free(struct ion_buffer *buffer)
426 struct sg_table *table = buffer->sg_table;
427 struct page *page = sg_page(table->sgl);
428 unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
431 for (i = 0; i < pages; i++)
432 __free_page(page + i);
433 sg_free_table(table);
437 static struct ion_heap_ops kmalloc_ops = {
438 .allocate = ion_system_contig_heap_allocate,
439 .free = ion_system_contig_heap_free,
440 .map_kernel = ion_heap_map_kernel,
441 .unmap_kernel = ion_heap_unmap_kernel,
442 .map_user = ion_heap_map_user,
445 struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
447 struct ion_heap *heap;
449 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
451 return ERR_PTR(-ENOMEM);
452 heap->ops = &kmalloc_ops;
453 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
457 void ion_system_contig_heap_destroy(struct ion_heap *heap)