GNU Linux-libre 4.14.302-gnu1
[releases.git] / arch / mips / mm / dma-default.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
7  * Copyright (C) 2000, 2001, 06  Ralf Baechle <ralf@linux-mips.org>
8  * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9  */
10
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/mm.h>
14 #include <linux/export.h>
15 #include <linux/scatterlist.h>
16 #include <linux/string.h>
17 #include <linux/gfp.h>
18 #include <linux/highmem.h>
19 #include <linux/dma-contiguous.h>
20
21 #include <asm/cache.h>
22 #include <asm/cpu-type.h>
23 #include <asm/io.h>
24
25 #include <dma-coherence.h>
26
27 #if defined(CONFIG_DMA_MAYBE_COHERENT) && !defined(CONFIG_DMA_PERDEV_COHERENT)
28 /* User defined DMA coherency from command line. */
29 enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
30 EXPORT_SYMBOL_GPL(coherentio);
31 int hw_coherentio = 0;  /* Actual hardware supported DMA coherency setting. */
32
33 static int __init setcoherentio(char *str)
34 {
35         coherentio = IO_COHERENCE_ENABLED;
36         pr_info("Hardware DMA cache coherency (command line)\n");
37         return 0;
38 }
39 early_param("coherentio", setcoherentio);
40
41 static int __init setnocoherentio(char *str)
42 {
43         coherentio = IO_COHERENCE_DISABLED;
44         pr_info("Software DMA cache coherency (command line)\n");
45         return 0;
46 }
47 early_param("nocoherentio", setnocoherentio);
48 #endif
49
50 static inline struct page *dma_addr_to_page(struct device *dev,
51         dma_addr_t dma_addr)
52 {
53         return pfn_to_page(
54                 plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
55 }
56
57 /*
58  * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
59  * speculatively fill random cachelines with stale data at any time,
60  * requiring an extra flush post-DMA.
61  *
62  * Warning on the terminology - Linux calls an uncached area coherent;
63  * MIPS terminology calls memory areas with hardware maintained coherency
64  * coherent.
65  *
66  * Note that the R14000 and R16000 should also be checked for in this
67  * condition.  However this function is only called on non-I/O-coherent
68  * systems and only the R10000 and R12000 are used in such systems, the
69  * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
70  */
71 static inline bool cpu_needs_post_dma_flush(struct device *dev)
72 {
73         if (plat_device_is_coherent(dev))
74                 return false;
75
76         switch (boot_cpu_type()) {
77         case CPU_R10000:
78         case CPU_R12000:
79         case CPU_BMIPS5000:
80                 return true;
81
82         default:
83                 /*
84                  * Presence of MAARs suggests that the CPU supports
85                  * speculatively prefetching data, and therefore requires
86                  * the post-DMA flush/invalidate.
87                  */
88                 return cpu_has_maar;
89         }
90 }
91
92 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
93 {
94         gfp_t dma_flag;
95
96         /* ignore region specifiers */
97         gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
98
99 #ifdef CONFIG_ISA
100         if (dev == NULL)
101                 dma_flag = __GFP_DMA;
102         else
103 #endif
104 #if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
105              if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(32))
106                         dma_flag = __GFP_DMA;
107         else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
108                         dma_flag = __GFP_DMA32;
109         else
110 #endif
111 #if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
112              if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(64))
113                 dma_flag = __GFP_DMA32;
114         else
115 #endif
116 #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
117              if (dev == NULL ||
118                  dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
119                 dma_flag = __GFP_DMA;
120         else
121 #endif
122                 dma_flag = 0;
123
124         /* Don't invoke OOM killer */
125         gfp |= __GFP_NORETRY;
126
127         return gfp | dma_flag;
128 }
129
130 static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
131         dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
132 {
133         void *ret;
134         struct page *page = NULL;
135         unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
136
137         gfp = massage_gfp_flags(dev, gfp);
138
139         if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
140                 page = dma_alloc_from_contiguous(dev, count, get_order(size),
141                                                  gfp);
142         if (!page)
143                 page = alloc_pages(gfp, get_order(size));
144
145         if (!page)
146                 return NULL;
147
148         ret = page_address(page);
149         memset(ret, 0, size);
150         *dma_handle = plat_map_dma_mem(dev, ret, size);
151         if (!(attrs & DMA_ATTR_NON_CONSISTENT) &&
152             !plat_device_is_coherent(dev)) {
153                 dma_cache_wback_inv((unsigned long) ret, size);
154                 ret = UNCAC_ADDR(ret);
155         }
156
157         return ret;
158 }
159
160 static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
161         dma_addr_t dma_handle, unsigned long attrs)
162 {
163         unsigned long addr = (unsigned long) vaddr;
164         unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
165         struct page *page = NULL;
166
167         plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
168
169         if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !plat_device_is_coherent(dev))
170                 addr = CAC_ADDR(addr);
171
172         page = virt_to_page((void *) addr);
173
174         if (!dma_release_from_contiguous(dev, page, count))
175                 __free_pages(page, get_order(size));
176 }
177
178 static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
179         void *cpu_addr, dma_addr_t dma_addr, size_t size,
180         unsigned long attrs)
181 {
182         unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
183         unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
184         unsigned long addr = (unsigned long)cpu_addr;
185         unsigned long off = vma->vm_pgoff;
186         unsigned long pfn;
187         int ret = -ENXIO;
188
189         if (!plat_device_is_coherent(dev))
190                 addr = CAC_ADDR(addr);
191
192         pfn = page_to_pfn(virt_to_page((void *)addr));
193
194         if (attrs & DMA_ATTR_WRITE_COMBINE)
195                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
196         else
197                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
198
199         if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
200                 return ret;
201
202         if (off < count && user_count <= (count - off)) {
203                 ret = remap_pfn_range(vma, vma->vm_start,
204                                       pfn + off,
205                                       user_count << PAGE_SHIFT,
206                                       vma->vm_page_prot);
207         }
208
209         return ret;
210 }
211
212 static inline void __dma_sync_virtual(void *addr, size_t size,
213         enum dma_data_direction direction)
214 {
215         switch (direction) {
216         case DMA_TO_DEVICE:
217                 dma_cache_wback((unsigned long)addr, size);
218                 break;
219
220         case DMA_FROM_DEVICE:
221                 dma_cache_inv((unsigned long)addr, size);
222                 break;
223
224         case DMA_BIDIRECTIONAL:
225                 dma_cache_wback_inv((unsigned long)addr, size);
226                 break;
227
228         default:
229                 BUG();
230         }
231 }
232
233 /*
234  * A single sg entry may refer to multiple physically contiguous
235  * pages. But we still need to process highmem pages individually.
236  * If highmem is not configured then the bulk of this loop gets
237  * optimized out.
238  */
239 static inline void __dma_sync(struct page *page,
240         unsigned long offset, size_t size, enum dma_data_direction direction)
241 {
242         size_t left = size;
243
244         do {
245                 size_t len = left;
246
247                 if (PageHighMem(page)) {
248                         void *addr;
249
250                         if (offset + len > PAGE_SIZE) {
251                                 if (offset >= PAGE_SIZE) {
252                                         page += offset >> PAGE_SHIFT;
253                                         offset &= ~PAGE_MASK;
254                                 }
255                                 len = PAGE_SIZE - offset;
256                         }
257
258                         addr = kmap_atomic(page);
259                         __dma_sync_virtual(addr + offset, len, direction);
260                         kunmap_atomic(addr);
261                 } else
262                         __dma_sync_virtual(page_address(page) + offset,
263                                            size, direction);
264                 offset = 0;
265                 page++;
266                 left -= len;
267         } while (left);
268 }
269
270 static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
271         size_t size, enum dma_data_direction direction, unsigned long attrs)
272 {
273         if (cpu_needs_post_dma_flush(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
274                 __dma_sync(dma_addr_to_page(dev, dma_addr),
275                            dma_addr & ~PAGE_MASK, size, direction);
276         plat_post_dma_flush(dev);
277         plat_unmap_dma_mem(dev, dma_addr, size, direction);
278 }
279
280 static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
281         int nents, enum dma_data_direction direction, unsigned long attrs)
282 {
283         int i;
284         struct scatterlist *sg;
285
286         for_each_sg(sglist, sg, nents, i) {
287                 if (!plat_device_is_coherent(dev) &&
288                     !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
289                         __dma_sync(sg_page(sg), sg->offset, sg->length,
290                                    direction);
291 #ifdef CONFIG_NEED_SG_DMA_LENGTH
292                 sg->dma_length = sg->length;
293 #endif
294                 sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
295                                   sg->offset;
296         }
297
298         return nents;
299 }
300
301 static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
302         unsigned long offset, size_t size, enum dma_data_direction direction,
303         unsigned long attrs)
304 {
305         if (!plat_device_is_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
306                 __dma_sync(page, offset, size, direction);
307
308         return plat_map_dma_mem_page(dev, page) + offset;
309 }
310
311 static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
312         int nhwentries, enum dma_data_direction direction,
313         unsigned long attrs)
314 {
315         int i;
316         struct scatterlist *sg;
317
318         for_each_sg(sglist, sg, nhwentries, i) {
319                 if (!plat_device_is_coherent(dev) &&
320                     !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
321                     direction != DMA_TO_DEVICE)
322                         __dma_sync(sg_page(sg), sg->offset, sg->length,
323                                    direction);
324                 plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
325         }
326 }
327
328 static void mips_dma_sync_single_for_cpu(struct device *dev,
329         dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
330 {
331         if (cpu_needs_post_dma_flush(dev))
332                 __dma_sync(dma_addr_to_page(dev, dma_handle),
333                            dma_handle & ~PAGE_MASK, size, direction);
334         plat_post_dma_flush(dev);
335 }
336
337 static void mips_dma_sync_single_for_device(struct device *dev,
338         dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
339 {
340         if (!plat_device_is_coherent(dev))
341                 __dma_sync(dma_addr_to_page(dev, dma_handle),
342                            dma_handle & ~PAGE_MASK, size, direction);
343 }
344
345 static void mips_dma_sync_sg_for_cpu(struct device *dev,
346         struct scatterlist *sglist, int nelems,
347         enum dma_data_direction direction)
348 {
349         int i;
350         struct scatterlist *sg;
351
352         if (cpu_needs_post_dma_flush(dev)) {
353                 for_each_sg(sglist, sg, nelems, i) {
354                         __dma_sync(sg_page(sg), sg->offset, sg->length,
355                                    direction);
356                 }
357         }
358         plat_post_dma_flush(dev);
359 }
360
361 static void mips_dma_sync_sg_for_device(struct device *dev,
362         struct scatterlist *sglist, int nelems,
363         enum dma_data_direction direction)
364 {
365         int i;
366         struct scatterlist *sg;
367
368         if (!plat_device_is_coherent(dev)) {
369                 for_each_sg(sglist, sg, nelems, i) {
370                         __dma_sync(sg_page(sg), sg->offset, sg->length,
371                                    direction);
372                 }
373         }
374 }
375
376 static int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
377 {
378         return 0;
379 }
380
381 static int mips_dma_supported(struct device *dev, u64 mask)
382 {
383         return plat_dma_supported(dev, mask);
384 }
385
386 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
387                          enum dma_data_direction direction)
388 {
389         BUG_ON(direction == DMA_NONE);
390
391         if (!plat_device_is_coherent(dev))
392                 __dma_sync_virtual(vaddr, size, direction);
393 }
394
395 EXPORT_SYMBOL(dma_cache_sync);
396
397 static const struct dma_map_ops mips_default_dma_map_ops = {
398         .alloc = mips_dma_alloc_coherent,
399         .free = mips_dma_free_coherent,
400         .mmap = mips_dma_mmap,
401         .map_page = mips_dma_map_page,
402         .unmap_page = mips_dma_unmap_page,
403         .map_sg = mips_dma_map_sg,
404         .unmap_sg = mips_dma_unmap_sg,
405         .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
406         .sync_single_for_device = mips_dma_sync_single_for_device,
407         .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
408         .sync_sg_for_device = mips_dma_sync_sg_for_device,
409         .mapping_error = mips_dma_mapping_error,
410         .dma_supported = mips_dma_supported
411 };
412
413 const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
414 EXPORT_SYMBOL(mips_dma_map_ops);
415
416 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
417
418 static int __init mips_dma_init(void)
419 {
420         dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
421
422         return 0;
423 }
424 fs_initcall(mips_dma_init);