1 // SPDX-License-Identifier: GPL-2.0
3 * Coherent per-device memory handling.
7 #include <linux/slab.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/dma-direct.h>
11 #include <linux/dma-map-ops.h>
13 struct dma_coherent_mem {
15 dma_addr_t device_base;
16 unsigned long pfn_base;
18 unsigned long *bitmap;
20 bool use_dev_dma_pfn_offset;
23 static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
25 if (dev && dev->dma_mem)
30 static inline dma_addr_t dma_get_device_base(struct device *dev,
31 struct dma_coherent_mem * mem)
33 if (mem->use_dev_dma_pfn_offset)
34 return phys_to_dma(dev, PFN_PHYS(mem->pfn_base));
35 return mem->device_base;
38 static struct dma_coherent_mem *dma_init_coherent_memory(phys_addr_t phys_addr,
39 dma_addr_t device_addr, size_t size, bool use_dma_pfn_offset)
41 struct dma_coherent_mem *dma_mem;
42 int pages = size >> PAGE_SHIFT;
46 return ERR_PTR(-EINVAL);
48 mem_base = memremap(phys_addr, size, MEMREMAP_WC);
50 return ERR_PTR(-EINVAL);
52 dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
54 goto out_unmap_membase;
55 dma_mem->bitmap = bitmap_zalloc(pages, GFP_KERNEL);
57 goto out_free_dma_mem;
59 dma_mem->virt_base = mem_base;
60 dma_mem->device_base = device_addr;
61 dma_mem->pfn_base = PFN_DOWN(phys_addr);
62 dma_mem->size = pages;
63 dma_mem->use_dev_dma_pfn_offset = use_dma_pfn_offset;
64 spin_lock_init(&dma_mem->spinlock);
72 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %zd MiB\n",
73 &phys_addr, size / SZ_1M);
74 return ERR_PTR(-ENOMEM);
77 static void _dma_release_coherent_memory(struct dma_coherent_mem *mem)
82 memunmap(mem->virt_base);
83 bitmap_free(mem->bitmap);
87 static int dma_assign_coherent_memory(struct device *dev,
88 struct dma_coherent_mem *mem)
101 * Declare a region of memory to be handed out by dma_alloc_coherent() when it
102 * is asked for coherent memory for this device. This shall only be used
103 * from platform code, usually based on the device tree description.
105 * phys_addr is the CPU physical address to which the memory is currently
106 * assigned (this will be ioremapped so the CPU can access the region).
108 * device_addr is the DMA address the device needs to be programmed with to
109 * actually address this memory (this will be handed out as the dma_addr_t in
110 * dma_alloc_coherent()).
112 * size is the size of the area (must be a multiple of PAGE_SIZE).
114 * As a simplification for the platforms, only *one* such region of memory may
115 * be declared per device.
117 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
118 dma_addr_t device_addr, size_t size)
120 struct dma_coherent_mem *mem;
123 mem = dma_init_coherent_memory(phys_addr, device_addr, size, false);
127 ret = dma_assign_coherent_memory(dev, mem);
129 _dma_release_coherent_memory(mem);
133 void dma_release_coherent_memory(struct device *dev)
136 _dma_release_coherent_memory(dev->dma_mem);
141 static void *__dma_alloc_from_coherent(struct device *dev,
142 struct dma_coherent_mem *mem,
143 ssize_t size, dma_addr_t *dma_handle)
145 int order = get_order(size);
150 spin_lock_irqsave(&mem->spinlock, flags);
152 if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT)))
155 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
156 if (unlikely(pageno < 0))
160 * Memory was found in the coherent area.
162 *dma_handle = dma_get_device_base(dev, mem) +
163 ((dma_addr_t)pageno << PAGE_SHIFT);
164 ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT);
165 spin_unlock_irqrestore(&mem->spinlock, flags);
166 memset(ret, 0, size);
169 spin_unlock_irqrestore(&mem->spinlock, flags);
174 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
175 * @dev: device from which we allocate memory
176 * @size: size of requested memory area
177 * @dma_handle: This will be filled with the correct dma handle
178 * @ret: This pointer will be filled with the virtual address
181 * This function should be only called from per-arch dma_alloc_coherent()
182 * to support allocation from per-device coherent memory pools.
184 * Returns 0 if dma_alloc_coherent should continue with allocating from
185 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
187 int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
188 dma_addr_t *dma_handle, void **ret)
190 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
195 *ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle);
199 static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
200 int order, void *vaddr)
202 if (mem && vaddr >= mem->virt_base && vaddr <
203 (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
204 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
207 spin_lock_irqsave(&mem->spinlock, flags);
208 bitmap_release_region(mem->bitmap, page, order);
209 spin_unlock_irqrestore(&mem->spinlock, flags);
216 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
217 * @dev: device from which the memory was allocated
218 * @order: the order of pages allocated
219 * @vaddr: virtual address of allocated pages
221 * This checks whether the memory was allocated from the per-device
222 * coherent memory pool and if so, releases that memory.
224 * Returns 1 if we correctly released the memory, or 0 if the caller should
225 * proceed with releasing memory from generic pools.
227 int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
229 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
231 return __dma_release_from_coherent(mem, order, vaddr);
234 static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
235 struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
237 if (mem && vaddr >= mem->virt_base && vaddr + size <=
238 (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
239 unsigned long off = vma->vm_pgoff;
240 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
241 unsigned long user_count = vma_pages(vma);
242 int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
245 if (off < count && user_count <= count - off) {
246 unsigned long pfn = mem->pfn_base + start + off;
247 *ret = remap_pfn_range(vma, vma->vm_start, pfn,
248 user_count << PAGE_SHIFT,
257 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
258 * @dev: device from which the memory was allocated
259 * @vma: vm_area for the userspace memory
260 * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
261 * @size: size of the memory buffer allocated
262 * @ret: result from remap_pfn_range()
264 * This checks whether the memory was allocated from the per-device
265 * coherent memory pool and if so, maps that memory to the provided vma.
267 * Returns 1 if @vaddr belongs to the device coherent pool and the caller
268 * should return @ret, or 0 if they should proceed with mapping memory from
271 int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
272 void *vaddr, size_t size, int *ret)
274 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
276 return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
279 #ifdef CONFIG_DMA_GLOBAL_POOL
280 static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
282 void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
283 dma_addr_t *dma_handle)
285 if (!dma_coherent_default_memory)
288 return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size,
292 int dma_release_from_global_coherent(int order, void *vaddr)
294 if (!dma_coherent_default_memory)
297 return __dma_release_from_coherent(dma_coherent_default_memory, order,
301 int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
302 size_t size, int *ret)
304 if (!dma_coherent_default_memory)
307 return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
311 int dma_init_global_coherent(phys_addr_t phys_addr, size_t size)
313 struct dma_coherent_mem *mem;
315 mem = dma_init_coherent_memory(phys_addr, phys_addr, size, true);
318 dma_coherent_default_memory = mem;
319 pr_info("DMA: default coherent area is set\n");
322 #endif /* CONFIG_DMA_GLOBAL_POOL */
325 * Support for reserved memory regions defined in device tree
327 #ifdef CONFIG_OF_RESERVED_MEM
328 #include <linux/of.h>
329 #include <linux/of_fdt.h>
330 #include <linux/of_reserved_mem.h>
332 #ifdef CONFIG_DMA_GLOBAL_POOL
333 static struct reserved_mem *dma_reserved_default_memory __initdata;
336 static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
339 struct dma_coherent_mem *mem;
341 mem = dma_init_coherent_memory(rmem->base, rmem->base,
347 dma_assign_coherent_memory(dev, rmem->priv);
351 static void rmem_dma_device_release(struct reserved_mem *rmem,
358 static const struct reserved_mem_ops rmem_dma_ops = {
359 .device_init = rmem_dma_device_init,
360 .device_release = rmem_dma_device_release,
363 static int __init rmem_dma_setup(struct reserved_mem *rmem)
365 unsigned long node = rmem->fdt_node;
367 if (of_get_flat_dt_prop(node, "reusable", NULL))
371 if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
372 pr_err("Reserved memory: regions without no-map are not yet supported\n");
377 #ifdef CONFIG_DMA_GLOBAL_POOL
378 if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
379 WARN(dma_reserved_default_memory,
380 "Reserved memory: region for default DMA coherent area is redefined\n");
381 dma_reserved_default_memory = rmem;
385 rmem->ops = &rmem_dma_ops;
386 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
387 &rmem->base, (unsigned long)rmem->size / SZ_1M);
391 #ifdef CONFIG_DMA_GLOBAL_POOL
392 static int __init dma_init_reserved_memory(void)
394 if (!dma_reserved_default_memory)
396 return dma_init_global_coherent(dma_reserved_default_memory->base,
397 dma_reserved_default_memory->size);
399 core_initcall(dma_init_reserved_memory);
400 #endif /* CONFIG_DMA_GLOBAL_POOL */
402 RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);