1 // SPDX-License-Identifier: GPL-2.0
3 * DMABUF CMA heap exporter
5 * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
6 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
8 * Also utilizing parts of Andrew Davis' SRAM heap:
9 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10 * Andrew F. Davis <afd@ti.com>
12 #include <linux/cma.h>
13 #include <linux/dma-buf.h>
14 #include <linux/dma-heap.h>
15 #include <linux/dma-map-ops.h>
16 #include <linux/err.h>
17 #include <linux/highmem.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
27 struct dma_heap *heap;
31 struct cma_heap_buffer {
32 struct cma_heap *heap;
33 struct list_head attachments;
36 struct page *cma_pages;
43 struct dma_heap_attachment {
45 struct sg_table table;
46 struct list_head list;
50 static int cma_heap_attach(struct dma_buf *dmabuf,
51 struct dma_buf_attachment *attachment)
53 struct cma_heap_buffer *buffer = dmabuf->priv;
54 struct dma_heap_attachment *a;
57 a = kzalloc(sizeof(*a), GFP_KERNEL);
61 ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
63 buffer->pagecount << PAGE_SHIFT,
70 a->dev = attachment->dev;
71 INIT_LIST_HEAD(&a->list);
76 mutex_lock(&buffer->lock);
77 list_add(&a->list, &buffer->attachments);
78 mutex_unlock(&buffer->lock);
83 static void cma_heap_detach(struct dma_buf *dmabuf,
84 struct dma_buf_attachment *attachment)
86 struct cma_heap_buffer *buffer = dmabuf->priv;
87 struct dma_heap_attachment *a = attachment->priv;
89 mutex_lock(&buffer->lock);
91 mutex_unlock(&buffer->lock);
93 sg_free_table(&a->table);
97 static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
98 enum dma_data_direction direction)
100 struct dma_heap_attachment *a = attachment->priv;
101 struct sg_table *table = &a->table;
104 ret = dma_map_sgtable(attachment->dev, table, direction, 0);
106 return ERR_PTR(-ENOMEM);
111 static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
112 struct sg_table *table,
113 enum dma_data_direction direction)
115 struct dma_heap_attachment *a = attachment->priv;
118 dma_unmap_sgtable(attachment->dev, table, direction, 0);
121 static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
122 enum dma_data_direction direction)
124 struct cma_heap_buffer *buffer = dmabuf->priv;
125 struct dma_heap_attachment *a;
127 mutex_lock(&buffer->lock);
129 if (buffer->vmap_cnt)
130 invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
132 list_for_each_entry(a, &buffer->attachments, list) {
135 dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
137 mutex_unlock(&buffer->lock);
142 static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
143 enum dma_data_direction direction)
145 struct cma_heap_buffer *buffer = dmabuf->priv;
146 struct dma_heap_attachment *a;
148 mutex_lock(&buffer->lock);
150 if (buffer->vmap_cnt)
151 flush_kernel_vmap_range(buffer->vaddr, buffer->len);
153 list_for_each_entry(a, &buffer->attachments, list) {
156 dma_sync_sgtable_for_device(a->dev, &a->table, direction);
158 mutex_unlock(&buffer->lock);
163 static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
165 struct vm_area_struct *vma = vmf->vma;
166 struct cma_heap_buffer *buffer = vma->vm_private_data;
168 if (vmf->pgoff > buffer->pagecount)
169 return VM_FAULT_SIGBUS;
171 return vmf_insert_pfn(vma, vmf->address, page_to_pfn(buffer->pages[vmf->pgoff]));
174 static const struct vm_operations_struct dma_heap_vm_ops = {
175 .fault = cma_heap_vm_fault,
178 static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
180 struct cma_heap_buffer *buffer = dmabuf->priv;
182 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
185 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
187 vma->vm_ops = &dma_heap_vm_ops;
188 vma->vm_private_data = buffer;
193 static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
197 vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
199 return ERR_PTR(-ENOMEM);
204 static int cma_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
206 struct cma_heap_buffer *buffer = dmabuf->priv;
210 mutex_lock(&buffer->lock);
211 if (buffer->vmap_cnt) {
213 iosys_map_set_vaddr(map, buffer->vaddr);
217 vaddr = cma_heap_do_vmap(buffer);
219 ret = PTR_ERR(vaddr);
222 buffer->vaddr = vaddr;
224 iosys_map_set_vaddr(map, buffer->vaddr);
226 mutex_unlock(&buffer->lock);
231 static void cma_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
233 struct cma_heap_buffer *buffer = dmabuf->priv;
235 mutex_lock(&buffer->lock);
236 if (!--buffer->vmap_cnt) {
237 vunmap(buffer->vaddr);
238 buffer->vaddr = NULL;
240 mutex_unlock(&buffer->lock);
241 iosys_map_clear(map);
244 static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
246 struct cma_heap_buffer *buffer = dmabuf->priv;
247 struct cma_heap *cma_heap = buffer->heap;
249 if (buffer->vmap_cnt > 0) {
250 WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
251 vunmap(buffer->vaddr);
252 buffer->vaddr = NULL;
256 kfree(buffer->pages);
258 cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
262 static const struct dma_buf_ops cma_heap_buf_ops = {
263 .attach = cma_heap_attach,
264 .detach = cma_heap_detach,
265 .map_dma_buf = cma_heap_map_dma_buf,
266 .unmap_dma_buf = cma_heap_unmap_dma_buf,
267 .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
268 .end_cpu_access = cma_heap_dma_buf_end_cpu_access,
269 .mmap = cma_heap_mmap,
270 .vmap = cma_heap_vmap,
271 .vunmap = cma_heap_vunmap,
272 .release = cma_heap_dma_buf_release,
275 static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
277 unsigned long fd_flags,
278 unsigned long heap_flags)
280 struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
281 struct cma_heap_buffer *buffer;
282 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
283 size_t size = PAGE_ALIGN(len);
284 pgoff_t pagecount = size >> PAGE_SHIFT;
285 unsigned long align = get_order(size);
286 struct page *cma_pages;
287 struct dma_buf *dmabuf;
291 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
293 return ERR_PTR(-ENOMEM);
295 INIT_LIST_HEAD(&buffer->attachments);
296 mutex_init(&buffer->lock);
299 if (align > CONFIG_CMA_ALIGNMENT)
300 align = CONFIG_CMA_ALIGNMENT;
302 cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false);
306 /* Clear the cma pages */
307 if (PageHighMem(cma_pages)) {
308 unsigned long nr_clear_pages = pagecount;
309 struct page *page = cma_pages;
311 while (nr_clear_pages > 0) {
312 void *vaddr = kmap_atomic(page);
314 memset(vaddr, 0, PAGE_SIZE);
315 kunmap_atomic(vaddr);
317 * Avoid wasting time zeroing memory if the process
318 * has been killed by by SIGKILL
320 if (fatal_signal_pending(current))
326 memset(page_address(cma_pages), 0, size);
329 buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
330 if (!buffer->pages) {
335 for (pg = 0; pg < pagecount; pg++)
336 buffer->pages[pg] = &cma_pages[pg];
338 buffer->cma_pages = cma_pages;
339 buffer->heap = cma_heap;
340 buffer->pagecount = pagecount;
342 /* create the dmabuf */
343 exp_info.exp_name = dma_heap_get_name(heap);
344 exp_info.ops = &cma_heap_buf_ops;
345 exp_info.size = buffer->len;
346 exp_info.flags = fd_flags;
347 exp_info.priv = buffer;
348 dmabuf = dma_buf_export(&exp_info);
349 if (IS_ERR(dmabuf)) {
350 ret = PTR_ERR(dmabuf);
356 kfree(buffer->pages);
358 cma_release(cma_heap->cma, cma_pages, pagecount);
365 static const struct dma_heap_ops cma_heap_ops = {
366 .allocate = cma_heap_allocate,
369 static int __add_cma_heap(struct cma *cma, void *data)
371 struct cma_heap *cma_heap;
372 struct dma_heap_export_info exp_info;
374 cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
379 exp_info.name = cma_get_name(cma);
380 exp_info.ops = &cma_heap_ops;
381 exp_info.priv = cma_heap;
383 cma_heap->heap = dma_heap_add(&exp_info);
384 if (IS_ERR(cma_heap->heap)) {
385 int ret = PTR_ERR(cma_heap->heap);
394 static int add_default_cma_heap(void)
396 struct cma *default_cma = dev_get_cma_area(NULL);
400 ret = __add_cma_heap(default_cma, NULL);
404 module_init(add_default_cma_heap);
405 MODULE_DESCRIPTION("DMA-BUF CMA Heap");