1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
4 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
5 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
7 #include <linux/dma-direct.h>
8 #include <linux/dma-map-ops.h>
9 #include <linux/highmem.h>
11 #include <asm/cache.h>
12 #include <asm/cpu-type.h>
16 * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
17 * fill random cachelines with stale data at any time, requiring an extra
20 * Warning on the terminology - Linux calls an uncached area coherent; MIPS
21 * terminology calls memory areas with hardware maintained coherency coherent.
23 * Note that the R14000 and R16000 should also be checked for in this condition.
24 * However this function is only called on non-I/O-coherent systems and only the
25 * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
28 static inline bool cpu_needs_post_dma_flush(void)
30 switch (boot_cpu_type()) {
39 * Presence of MAARs suggests that the CPU supports
40 * speculatively prefetching data, and therefore requires
41 * the post-DMA flush/invalidate.
47 void arch_dma_prep_coherent(struct page *page, size_t size)
49 dma_cache_wback_inv((unsigned long)page_address(page), size);
52 void *arch_dma_set_uncached(void *addr, size_t size)
54 return (void *)(__pa(addr) + UNCAC_BASE);
57 static inline void dma_sync_virt_for_device(void *addr, size_t size,
58 enum dma_data_direction dir)
62 dma_cache_wback((unsigned long)addr, size);
65 dma_cache_inv((unsigned long)addr, size);
67 case DMA_BIDIRECTIONAL:
68 dma_cache_wback_inv((unsigned long)addr, size);
75 static inline void dma_sync_virt_for_cpu(void *addr, size_t size,
76 enum dma_data_direction dir)
82 case DMA_BIDIRECTIONAL:
83 dma_cache_inv((unsigned long)addr, size);
91 * A single sg entry may refer to multiple physically contiguous pages. But
92 * we still need to process highmem pages individually. If highmem is not
93 * configured then the bulk of this loop gets optimized out.
95 static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
96 enum dma_data_direction dir, bool for_device)
98 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
99 unsigned long offset = paddr & ~PAGE_MASK;
106 if (PageHighMem(page)) {
107 if (offset + len > PAGE_SIZE)
108 len = PAGE_SIZE - offset;
111 addr = kmap_atomic(page);
113 dma_sync_virt_for_device(addr + offset, len, dir);
115 dma_sync_virt_for_cpu(addr + offset, len, dir);
124 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
125 enum dma_data_direction dir)
127 dma_sync_phys(paddr, size, dir, true);
130 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
131 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
132 enum dma_data_direction dir)
134 if (cpu_needs_post_dma_flush())
135 dma_sync_phys(paddr, size, dir, false);
139 #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
140 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
141 const struct iommu_ops *iommu, bool coherent)
143 dev->dma_coherent = coherent;