1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
4 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
5 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
7 #include <linux/dma-direct.h>
8 #include <linux/dma-noncoherent.h>
9 #include <linux/dma-contiguous.h>
10 #include <linux/highmem.h>
12 #include <asm/cache.h>
13 #include <asm/cpu-type.h>
14 #include <asm/dma-coherence.h>
17 #ifdef CONFIG_DMA_PERDEV_COHERENT
18 static inline int dev_is_coherent(struct device *dev)
20 return dev->archdata.dma_coherent;
23 static inline int dev_is_coherent(struct device *dev)
27 case IO_COHERENCE_DEFAULT:
29 case IO_COHERENCE_ENABLED:
31 case IO_COHERENCE_DISABLED:
35 #endif /* CONFIG_DMA_PERDEV_COHERENT */
38 * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
39 * fill random cachelines with stale data at any time, requiring an extra
42 * Warning on the terminology - Linux calls an uncached area coherent; MIPS
43 * terminology calls memory areas with hardware maintained coherency coherent.
45 * Note that the R14000 and R16000 should also be checked for in this condition.
46 * However this function is only called on non-I/O-coherent systems and only the
47 * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
50 static inline bool cpu_needs_post_dma_flush(struct device *dev)
52 if (dev_is_coherent(dev))
55 switch (boot_cpu_type()) {
62 * Presence of MAARs suggests that the CPU supports
63 * speculatively prefetching data, and therefore requires
64 * the post-DMA flush/invalidate.
70 void *arch_dma_alloc(struct device *dev, size_t size,
71 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
75 ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
79 if (!dev_is_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
80 dma_cache_wback_inv((unsigned long) ret, size);
81 ret = (void *)UNCAC_ADDR(ret);
87 void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
88 dma_addr_t dma_addr, unsigned long attrs)
90 if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !dev_is_coherent(dev))
91 cpu_addr = (void *)CAC_ADDR((unsigned long)cpu_addr);
92 dma_direct_free(dev, size, cpu_addr, dma_addr, attrs);
95 int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
96 void *cpu_addr, dma_addr_t dma_addr, size_t size,
99 unsigned long user_count = vma_pages(vma);
100 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
101 unsigned long addr = (unsigned long)cpu_addr;
102 unsigned long off = vma->vm_pgoff;
106 if (!dev_is_coherent(dev))
107 addr = CAC_ADDR(addr);
109 pfn = page_to_pfn(virt_to_page((void *)addr));
111 if (attrs & DMA_ATTR_WRITE_COMBINE)
112 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
114 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
116 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
119 if (off < count && user_count <= (count - off)) {
120 ret = remap_pfn_range(vma, vma->vm_start,
122 user_count << PAGE_SHIFT,
129 static inline void dma_sync_virt(void *addr, size_t size,
130 enum dma_data_direction dir)
134 dma_cache_wback((unsigned long)addr, size);
137 case DMA_FROM_DEVICE:
138 dma_cache_inv((unsigned long)addr, size);
141 case DMA_BIDIRECTIONAL:
142 dma_cache_wback_inv((unsigned long)addr, size);
151 * A single sg entry may refer to multiple physically contiguous pages. But
152 * we still need to process highmem pages individually. If highmem is not
153 * configured then the bulk of this loop gets optimized out.
155 static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
156 enum dma_data_direction dir)
158 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
159 unsigned long offset = paddr & ~PAGE_MASK;
165 if (PageHighMem(page)) {
168 if (offset + len > PAGE_SIZE) {
169 if (offset >= PAGE_SIZE) {
170 page += offset >> PAGE_SHIFT;
171 offset &= ~PAGE_MASK;
173 len = PAGE_SIZE - offset;
176 addr = kmap_atomic(page);
177 dma_sync_virt(addr + offset, len, dir);
180 dma_sync_virt(page_address(page) + offset, size, dir);
187 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
188 size_t size, enum dma_data_direction dir)
190 if (!dev_is_coherent(dev))
191 dma_sync_phys(paddr, size, dir);
194 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
195 size_t size, enum dma_data_direction dir)
197 if (cpu_needs_post_dma_flush(dev))
198 dma_sync_phys(paddr, size, dir);
201 void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
202 enum dma_data_direction direction)
204 BUG_ON(direction == DMA_NONE);
206 if (!dev_is_coherent(dev))
207 dma_sync_virt(vaddr, size, direction);