GNU Linux-libre 4.9.304-gnu1
[releases.git] / drivers / staging / android / ion / ion_carveout_heap.c
1 /*
2  * drivers/staging/android/ion/ion_carveout_heap.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16 #include <linux/spinlock.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/genalloc.h>
20 #include <linux/io.h>
21 #include <linux/mm.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include "ion.h"
26 #include "ion_priv.h"
27
28 #define ION_CARVEOUT_ALLOCATE_FAIL      -1
29
30 struct ion_carveout_heap {
31         struct ion_heap heap;
32         struct gen_pool *pool;
33         ion_phys_addr_t base;
34 };
35
36 static ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
37                                              unsigned long size,
38                                              unsigned long align)
39 {
40         struct ion_carveout_heap *carveout_heap =
41                 container_of(heap, struct ion_carveout_heap, heap);
42         unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
43
44         if (!offset)
45                 return ION_CARVEOUT_ALLOCATE_FAIL;
46
47         return offset;
48 }
49
50 static void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
51                               unsigned long size)
52 {
53         struct ion_carveout_heap *carveout_heap =
54                 container_of(heap, struct ion_carveout_heap, heap);
55
56         if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
57                 return;
58         gen_pool_free(carveout_heap->pool, addr, size);
59 }
60
61 static int ion_carveout_heap_allocate(struct ion_heap *heap,
62                                       struct ion_buffer *buffer,
63                                       unsigned long size, unsigned long align,
64                                       unsigned long flags)
65 {
66         struct sg_table *table;
67         ion_phys_addr_t paddr;
68         int ret;
69
70         if (align > PAGE_SIZE)
71                 return -EINVAL;
72
73         table = kmalloc(sizeof(*table), GFP_KERNEL);
74         if (!table)
75                 return -ENOMEM;
76         ret = sg_alloc_table(table, 1, GFP_KERNEL);
77         if (ret)
78                 goto err_free;
79
80         paddr = ion_carveout_allocate(heap, size, align);
81         if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
82                 ret = -ENOMEM;
83                 goto err_free_table;
84         }
85
86         sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
87         buffer->sg_table = table;
88
89         return 0;
90
91 err_free_table:
92         sg_free_table(table);
93 err_free:
94         kfree(table);
95         return ret;
96 }
97
98 static void ion_carveout_heap_free(struct ion_buffer *buffer)
99 {
100         struct ion_heap *heap = buffer->heap;
101         struct sg_table *table = buffer->sg_table;
102         struct page *page = sg_page(table->sgl);
103         ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
104
105         ion_heap_buffer_zero(buffer);
106
107         if (ion_buffer_cached(buffer))
108                 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
109                                        DMA_BIDIRECTIONAL);
110
111         ion_carveout_free(heap, paddr, buffer->size);
112         sg_free_table(table);
113         kfree(table);
114 }
115
116 static struct ion_heap_ops carveout_heap_ops = {
117         .allocate = ion_carveout_heap_allocate,
118         .free = ion_carveout_heap_free,
119         .map_user = ion_heap_map_user,
120         .map_kernel = ion_heap_map_kernel,
121         .unmap_kernel = ion_heap_unmap_kernel,
122 };
123
124 struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
125 {
126         struct ion_carveout_heap *carveout_heap;
127         int ret;
128
129         struct page *page;
130         size_t size;
131
132         page = pfn_to_page(PFN_DOWN(heap_data->base));
133         size = heap_data->size;
134
135         ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
136
137         ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
138         if (ret)
139                 return ERR_PTR(ret);
140
141         carveout_heap = kzalloc(sizeof(*carveout_heap), GFP_KERNEL);
142         if (!carveout_heap)
143                 return ERR_PTR(-ENOMEM);
144
145         carveout_heap->pool = gen_pool_create(PAGE_SHIFT, -1);
146         if (!carveout_heap->pool) {
147                 kfree(carveout_heap);
148                 return ERR_PTR(-ENOMEM);
149         }
150         carveout_heap->base = heap_data->base;
151         gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
152                      -1);
153         carveout_heap->heap.ops = &carveout_heap_ops;
154         carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
155         carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
156
157         return &carveout_heap->heap;
158 }
159
160 void ion_carveout_heap_destroy(struct ion_heap *heap)
161 {
162         struct ion_carveout_heap *carveout_heap =
163              container_of(heap, struct  ion_carveout_heap, heap);
164
165         gen_pool_destroy(carveout_heap->pool);
166         kfree(carveout_heap);
167         carveout_heap = NULL;
168 }