2 * Meta version derived from arch/powerpc/lib/dma-noncoherent.c
3 * Copyright (C) 2008 Imagination Technologies Ltd.
5 * PowerPC version derived from arch/arm/mm/consistent.c
6 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
8 * Copyright (C) 2000 Russell King
10 * Consistent memory allocators. Used for DMA devices that want to
11 * share uncached memory with the processor core. The function return
12 * is the virtual address and 'dma_handle' is the physical address.
13 * Mostly stolen from the ARM port, with some changes for PowerPC.
16 * Reorganized to get rid of the arch-specific consistent_* functions
17 * and provide non-coherent implementations for the DMA API. -Matt
19 * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
20 * implementation. This is pulled straight from ARM and barely
23 * This program is free software; you can redistribute it and/or modify
24 * it under the terms of the GNU General Public License version 2 as
25 * published by the Free Software Foundation.
28 #include <linux/sched.h>
29 #include <linux/kernel.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/string.h>
33 #include <linux/types.h>
34 #include <linux/highmem.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/slab.h>
38 #include <asm/tlbflush.h>
41 #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_START) \
44 static u64 get_coherent_dma_mask(struct device *dev)
49 mask = dev->coherent_dma_mask;
52 * Sanity check the DMA mask - it must be non-zero, and
53 * must be able to be satisfied by a DMA allocation.
56 dev_warn(dev, "coherent DMA mask is unset\n");
64 * This is the page table (2MB) covering uncached, DMA consistent allocations
66 static pte_t *consistent_pte;
67 static DEFINE_SPINLOCK(consistent_lock);
70 * VM region handling support.
72 * This should become something generic, handling VM region allocations for
73 * vmalloc and similar (ioremap, module space, etc).
75 * I envisage vmalloc()'s supporting vm_struct becoming:
78 * struct metag_vm_region region;
79 * unsigned long flags;
80 * struct page **pages;
81 * unsigned int nr_pages;
82 * unsigned long phys_addr;
85 * get_vm_area() would then call metag_vm_region_alloc with an appropriate
86 * struct metag_vm_region head (eg):
88 * struct metag_vm_region vmalloc_head = {
89 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
90 * .vm_start = VMALLOC_START,
91 * .vm_end = VMALLOC_END,
94 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
95 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
96 * would have to initialise this each time prior to calling
97 * metag_vm_region_alloc().
99 struct metag_vm_region {
100 struct list_head vm_list;
101 unsigned long vm_start;
102 unsigned long vm_end;
103 struct page *vm_pages;
107 static struct metag_vm_region consistent_head = {
108 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
109 .vm_start = CONSISTENT_START,
110 .vm_end = CONSISTENT_END,
113 static struct metag_vm_region *metag_vm_region_alloc(struct metag_vm_region
117 unsigned long addr = head->vm_start, end = head->vm_end - size;
119 struct metag_vm_region *c, *new;
121 new = kmalloc(sizeof(struct metag_vm_region), gfp);
125 spin_lock_irqsave(&consistent_lock, flags);
127 list_for_each_entry(c, &head->vm_list, vm_list) {
128 if ((addr + size) < addr)
130 if ((addr + size) <= c->vm_start)
139 * Insert this entry _before_ the one we found.
141 list_add_tail(&new->vm_list, &c->vm_list);
142 new->vm_start = addr;
143 new->vm_end = addr + size;
146 spin_unlock_irqrestore(&consistent_lock, flags);
150 spin_unlock_irqrestore(&consistent_lock, flags);
156 static struct metag_vm_region *metag_vm_region_find(struct metag_vm_region
157 *head, unsigned long addr)
159 struct metag_vm_region *c;
161 list_for_each_entry(c, &head->vm_list, vm_list) {
162 if (c->vm_active && c->vm_start == addr)
171 * Allocate DMA-coherent memory space and return both the kernel remapped
172 * virtual and bus address for that space.
174 static void *metag_dma_alloc(struct device *dev, size_t size,
175 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
178 struct metag_vm_region *c;
180 u64 mask = get_coherent_dma_mask(dev);
183 if (!consistent_pte) {
184 pr_err("%s: not initialised\n", __func__);
191 size = PAGE_ALIGN(size);
192 limit = (mask + 1) & ~mask;
193 if ((limit && size >= limit)
194 || size >= (CONSISTENT_END - CONSISTENT_START)) {
195 pr_warn("coherent allocation too big (requested %#x mask %#Lx)\n",
200 order = get_order(size);
202 if (mask != 0xffffffff)
205 page = alloc_pages(gfp, order);
210 * Invalidate any data that might be lurking in the
211 * kernel direct-mapped region for device DMA.
214 void *kaddr = page_address(page);
215 memset(kaddr, 0, size);
216 flush_dcache_region(kaddr, size);
220 * Allocate a virtual address in the consistent mapping region.
222 c = metag_vm_region_alloc(&consistent_head, size,
223 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
225 unsigned long vaddr = c->vm_start;
226 pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr);
227 struct page *end = page + (1 << order);
230 split_page(page, order);
233 * Set the "dma handle"
235 *handle = page_to_bus(page);
238 BUG_ON(!pte_none(*pte));
240 SetPageReserved(page);
241 set_pte_at(&init_mm, vaddr,
248 } while (size -= PAGE_SIZE);
251 * Free the otherwise unused pages.
258 return (void *)c->vm_start;
262 __free_pages(page, order);
268 * free a page as defined by the above mapping.
270 static void metag_dma_free(struct device *dev, size_t size, void *vaddr,
271 dma_addr_t dma_handle, unsigned long attrs)
273 struct metag_vm_region *c;
274 unsigned long flags, addr;
277 size = PAGE_ALIGN(size);
279 spin_lock_irqsave(&consistent_lock, flags);
281 c = metag_vm_region_find(&consistent_head, (unsigned long)vaddr);
286 if ((c->vm_end - c->vm_start) != size) {
287 pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
288 __func__, c->vm_end - c->vm_start, size);
290 size = c->vm_end - c->vm_start;
293 ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
296 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
302 if (!pte_none(pte) && pte_present(pte)) {
305 if (pfn_valid(pfn)) {
306 struct page *page = pfn_to_page(pfn);
307 __free_reserved_page(page);
312 pr_crit("%s: bad page in kernel page table\n",
314 } while (size -= PAGE_SIZE);
316 flush_tlb_kernel_range(c->vm_start, c->vm_end);
318 list_del(&c->vm_list);
320 spin_unlock_irqrestore(&consistent_lock, flags);
326 spin_unlock_irqrestore(&consistent_lock, flags);
327 pr_err("%s: trying to free invalid coherent area: %p\n",
332 static int metag_dma_mmap(struct device *dev, struct vm_area_struct *vma,
333 void *cpu_addr, dma_addr_t dma_addr, size_t size,
336 unsigned long flags, user_size, kern_size;
337 struct metag_vm_region *c;
340 if (attrs & DMA_ATTR_WRITE_COMBINE)
341 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
343 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
345 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
347 spin_lock_irqsave(&consistent_lock, flags);
348 c = metag_vm_region_find(&consistent_head, (unsigned long)cpu_addr);
349 spin_unlock_irqrestore(&consistent_lock, flags);
352 unsigned long off = vma->vm_pgoff;
354 kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
356 if (off < kern_size &&
357 user_size <= (kern_size - off)) {
358 ret = remap_pfn_range(vma, vma->vm_start,
359 page_to_pfn(c->vm_pages) + off,
360 user_size << PAGE_SHIFT,
370 * Initialise the consistent memory allocation.
372 static int __init dma_alloc_init(void)
381 int offset = pgd_index(CONSISTENT_START);
382 pgd = pgd_offset(&init_mm, CONSISTENT_START);
383 pud = pud_alloc(&init_mm, pgd, CONSISTENT_START);
384 pmd = pmd_alloc(&init_mm, pud, CONSISTENT_START);
385 WARN_ON(!pmd_none(*pmd));
387 pte = pte_alloc_kernel(pmd, CONSISTENT_START);
389 pr_err("%s: no pte tables\n", __func__);
394 pgd_k = ((pgd_t *) mmu_get_base()) + offset;
395 pud_k = pud_offset(pgd_k, CONSISTENT_START);
396 pmd_k = pmd_offset(pud_k, CONSISTENT_START);
397 set_pmd(pmd_k, *pmd);
399 consistent_pte = pte;
404 early_initcall(dma_alloc_init);
407 * make an area consistent to devices.
409 static void dma_sync_for_device(void *vaddr, size_t size, int dma_direction)
412 * Ensure any writes get through the write combiner. This is necessary
413 * even with DMA_FROM_DEVICE, or the write may dirty the cache after
414 * we've invalidated it and get written back during the DMA.
419 switch (dma_direction) {
420 case DMA_BIDIRECTIONAL:
422 * Writeback to ensure the device can see our latest changes and
423 * so that we have no dirty lines, and invalidate the cache
424 * lines too in preparation for receiving the buffer back
425 * (dma_sync_for_cpu) later.
427 flush_dcache_region(vaddr, size);
431 * Writeback to ensure the device can see our latest changes.
432 * There's no need to invalidate as the device shouldn't write
435 writeback_dcache_region(vaddr, size);
437 case DMA_FROM_DEVICE:
439 * Invalidate to ensure we have no dirty lines that could get
440 * written back during the DMA. It's also safe to flush
441 * (writeback) here if necessary.
443 invalidate_dcache_region(vaddr, size);
453 * make an area consistent to the core.
455 static void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction)
458 * Hardware L2 cache prefetch doesn't occur across 4K physical
459 * boundaries, however according to Documentation/DMA-API-HOWTO.txt
460 * kmalloc'd memory is DMA'able, so accesses in nearby memory could
461 * trigger a cache fill in the DMA buffer.
463 * This should never cause dirty lines, so a flush or invalidate should
464 * be safe to allow us to see data from the device.
466 if (_meta_l2c_pf_is_enabled()) {
467 switch (dma_direction) {
468 case DMA_BIDIRECTIONAL:
469 case DMA_FROM_DEVICE:
470 invalidate_dcache_region(vaddr, size);
473 /* The device shouldn't have written to the buffer */
483 static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page,
484 unsigned long offset, size_t size,
485 enum dma_data_direction direction, unsigned long attrs)
487 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
488 dma_sync_for_device((void *)(page_to_phys(page) + offset),
490 return page_to_phys(page) + offset;
493 static void metag_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
494 size_t size, enum dma_data_direction direction,
497 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
498 dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
501 static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist,
502 int nents, enum dma_data_direction direction,
505 struct scatterlist *sg;
508 for_each_sg(sglist, sg, nents, i) {
509 BUG_ON(!sg_page(sg));
511 sg->dma_address = sg_phys(sg);
513 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
516 dma_sync_for_device(sg_virt(sg), sg->length, direction);
523 static void metag_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
524 int nhwentries, enum dma_data_direction direction,
527 struct scatterlist *sg;
530 for_each_sg(sglist, sg, nhwentries, i) {
531 BUG_ON(!sg_page(sg));
533 sg->dma_address = sg_phys(sg);
535 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
538 dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
542 static void metag_dma_sync_single_for_cpu(struct device *dev,
543 dma_addr_t dma_handle, size_t size,
544 enum dma_data_direction direction)
546 dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
549 static void metag_dma_sync_single_for_device(struct device *dev,
550 dma_addr_t dma_handle, size_t size,
551 enum dma_data_direction direction)
553 dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
556 static void metag_dma_sync_sg_for_cpu(struct device *dev,
557 struct scatterlist *sglist, int nelems,
558 enum dma_data_direction direction)
561 struct scatterlist *sg;
563 for_each_sg(sglist, sg, nelems, i)
564 dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
567 static void metag_dma_sync_sg_for_device(struct device *dev,
568 struct scatterlist *sglist, int nelems,
569 enum dma_data_direction direction)
572 struct scatterlist *sg;
574 for_each_sg(sglist, sg, nelems, i)
575 dma_sync_for_device(sg_virt(sg), sg->length, direction);
578 const struct dma_map_ops metag_dma_ops = {
579 .alloc = metag_dma_alloc,
580 .free = metag_dma_free,
581 .map_page = metag_dma_map_page,
582 .map_sg = metag_dma_map_sg,
583 .sync_single_for_device = metag_dma_sync_single_for_device,
584 .sync_single_for_cpu = metag_dma_sync_single_for_cpu,
585 .sync_sg_for_cpu = metag_dma_sync_sg_for_cpu,
586 .mmap = metag_dma_mmap,
588 EXPORT_SYMBOL(metag_dma_ops);