2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/dma-noncoherent.h>
10 #include <asm/cache.h>
11 #include <asm/cacheflush.h>
14 * ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c)
15 * - hardware IOC not available (or "dma-coherent" not set for device in DT)
16 * - But still handle both coherent and non-coherent requests from caller
18 * For DMA coherent hardware (IOC) generic code suffices
20 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
21 gfp_t gfp, unsigned long attrs)
23 unsigned long order = get_order(size);
27 bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT);
30 * __GFP_HIGHMEM flag is cleared by upper layer functions
31 * (in include/linux/dma-mapping.h) so we should never get a
34 BUG_ON(gfp & __GFP_HIGHMEM);
36 page = alloc_pages(gfp, order);
40 /* This is linear addr (0x8000_0000 based) */
41 paddr = page_to_phys(page);
46 * A coherent buffer needs MMU mapping to enforce non-cachability.
47 * kvaddr is kernel Virtual address (0x7000_0000 based).
50 kvaddr = ioremap_nocache(paddr, size);
52 __free_pages(page, order);
56 kvaddr = (void *)(u32)paddr;
60 * Evict any existing L1 and/or L2 lines for the backing page
61 * in case it was used earlier as a normal "cached" page.
62 * Yeah this bit us - STAR 9000898266
64 * Although core does call flush_cache_vmap(), it gets kvaddr hence
65 * can't be used to efficiently flush L1 and/or L2 which need paddr
66 * Currently flush_cache_vmap nukes the L1 cache completely which
67 * will be optimized as a separate commit
70 dma_cache_wback_inv(paddr, size);
75 void arch_dma_free(struct device *dev, size_t size, void *vaddr,
76 dma_addr_t dma_handle, unsigned long attrs)
78 phys_addr_t paddr = dma_handle;
79 struct page *page = virt_to_page(paddr);
81 if (!(attrs & DMA_ATTR_NON_CONSISTENT))
82 iounmap((void __force __iomem *)vaddr);
84 __free_pages(page, get_order(size));
87 int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
88 void *cpu_addr, dma_addr_t dma_addr, size_t size,
91 unsigned long user_count = vma_pages(vma);
92 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
93 unsigned long pfn = __phys_to_pfn(dma_addr);
94 unsigned long off = vma->vm_pgoff;
97 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
99 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
102 if (off < count && user_count <= (count - off)) {
103 ret = remap_pfn_range(vma, vma->vm_start,
105 user_count << PAGE_SHIFT,
113 * Cache operations depending on function and direction argument, inspired by
114 * https://lkml.org/lkml/2018/5/18/979
115 * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
116 * dma-mapping: provide a generic dma-noncoherent implementation)"
118 * | map == for_device | unmap == for_cpu
119 * |----------------------------------------------------------------
120 * TO_DEV | writeback writeback | none none
121 * FROM_DEV | invalidate invalidate | invalidate* invalidate*
122 * BIDIR | writeback+inv writeback+inv | invalidate invalidate
124 * [*] needed for CPU speculative prefetches
126 * NOTE: we don't check the validity of direction argument as it is done in
127 * upper layer functions (in include/linux/dma-mapping.h)
130 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
131 size_t size, enum dma_data_direction dir)
135 dma_cache_wback(paddr, size);
138 case DMA_FROM_DEVICE:
139 dma_cache_inv(paddr, size);
142 case DMA_BIDIRECTIONAL:
143 dma_cache_wback_inv(paddr, size);
151 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
152 size_t size, enum dma_data_direction dir)
158 /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
159 case DMA_FROM_DEVICE:
160 case DMA_BIDIRECTIONAL:
161 dma_cache_inv(paddr, size);
170 * Plug in coherent or noncoherent dma ops
172 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
173 const struct iommu_ops *iommu, bool coherent)
176 * IOC hardware snoops all DMA traffic keeping the caches consistent
177 * with memory - eliding need for any explicit cache maintenance of
178 * DMA buffers - so we can use dma_direct cache ops.
180 if (is_isa_arcv2() && ioc_enable && coherent) {
181 set_dma_ops(dev, &dma_direct_ops);
182 dev_info(dev, "use dma_direct_ops cache ops\n");
184 set_dma_ops(dev, &dma_noncoherent_ops);
185 dev_info(dev, "use dma_noncoherent_ops cache ops\n");