GNU Linux-libre 4.14.332-gnu1
[releases.git] / arch / microblaze / kernel / dma.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009-2010 PetaLogix
4  * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
5  *
6  * Provide default implementations of the DMA mapping callbacks for
7  * directly mapped busses.
8  */
9
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/gfp.h>
13 #include <linux/dma-debug.h>
14 #include <linux/export.h>
15 #include <linux/bug.h>
16
17 #define NOT_COHERENT_CACHE
18
19 static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
20                                        dma_addr_t *dma_handle, gfp_t flag,
21                                        unsigned long attrs)
22 {
23 #ifdef NOT_COHERENT_CACHE
24         return consistent_alloc(flag, size, dma_handle);
25 #else
26         void *ret;
27         struct page *page;
28         int node = dev_to_node(dev);
29
30         /* ignore region specifiers */
31         flag  &= ~(__GFP_HIGHMEM);
32
33         page = alloc_pages_node(node, flag, get_order(size));
34         if (page == NULL)
35                 return NULL;
36         ret = page_address(page);
37         memset(ret, 0, size);
38         *dma_handle = virt_to_phys(ret);
39
40         return ret;
41 #endif
42 }
43
44 static void dma_direct_free_coherent(struct device *dev, size_t size,
45                                      void *vaddr, dma_addr_t dma_handle,
46                                      unsigned long attrs)
47 {
48 #ifdef NOT_COHERENT_CACHE
49         consistent_free(size, vaddr);
50 #else
51         free_pages((unsigned long)vaddr, get_order(size));
52 #endif
53 }
54
55 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
56                              int nents, enum dma_data_direction direction,
57                              unsigned long attrs)
58 {
59         struct scatterlist *sg;
60         int i;
61
62         /* FIXME this part of code is untested */
63         for_each_sg(sgl, sg, nents, i) {
64                 sg->dma_address = sg_phys(sg);
65
66                 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
67                         continue;
68
69                 __dma_sync(sg_phys(sg), sg->length, direction);
70         }
71
72         return nents;
73 }
74
75 static int dma_direct_dma_supported(struct device *dev, u64 mask)
76 {
77         return 1;
78 }
79
80 static inline dma_addr_t dma_direct_map_page(struct device *dev,
81                                              struct page *page,
82                                              unsigned long offset,
83                                              size_t size,
84                                              enum dma_data_direction direction,
85                                              unsigned long attrs)
86 {
87         if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
88                 __dma_sync(page_to_phys(page) + offset, size, direction);
89         return page_to_phys(page) + offset;
90 }
91
92 static inline void dma_direct_unmap_page(struct device *dev,
93                                          dma_addr_t dma_address,
94                                          size_t size,
95                                          enum dma_data_direction direction,
96                                          unsigned long attrs)
97 {
98 /* There is not necessary to do cache cleanup
99  *
100  * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
101  * dma_address is physical address
102  */
103         if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
104                 __dma_sync(dma_address, size, direction);
105 }
106
107 static inline void
108 dma_direct_sync_single_for_cpu(struct device *dev,
109                                dma_addr_t dma_handle, size_t size,
110                                enum dma_data_direction direction)
111 {
112         /*
113          * It's pointless to flush the cache as the memory segment
114          * is given to the CPU
115          */
116
117         if (direction == DMA_FROM_DEVICE)
118                 __dma_sync(dma_handle, size, direction);
119 }
120
121 static inline void
122 dma_direct_sync_single_for_device(struct device *dev,
123                                   dma_addr_t dma_handle, size_t size,
124                                   enum dma_data_direction direction)
125 {
126         /*
127          * It's pointless to invalidate the cache if the device isn't
128          * supposed to write to the relevant region
129          */
130
131         if (direction == DMA_TO_DEVICE)
132                 __dma_sync(dma_handle, size, direction);
133 }
134
135 static inline void
136 dma_direct_sync_sg_for_cpu(struct device *dev,
137                            struct scatterlist *sgl, int nents,
138                            enum dma_data_direction direction)
139 {
140         struct scatterlist *sg;
141         int i;
142
143         /* FIXME this part of code is untested */
144         if (direction == DMA_FROM_DEVICE)
145                 for_each_sg(sgl, sg, nents, i)
146                         __dma_sync(sg->dma_address, sg->length, direction);
147 }
148
149 static inline void
150 dma_direct_sync_sg_for_device(struct device *dev,
151                               struct scatterlist *sgl, int nents,
152                               enum dma_data_direction direction)
153 {
154         struct scatterlist *sg;
155         int i;
156
157         /* FIXME this part of code is untested */
158         if (direction == DMA_TO_DEVICE)
159                 for_each_sg(sgl, sg, nents, i)
160                         __dma_sync(sg->dma_address, sg->length, direction);
161 }
162
163 static
164 int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
165                              void *cpu_addr, dma_addr_t handle, size_t size,
166                              unsigned long attrs)
167 {
168 #ifdef CONFIG_MMU
169         unsigned long user_count = vma_pages(vma);
170         unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
171         unsigned long off = vma->vm_pgoff;
172         unsigned long pfn;
173
174         if (off >= count || user_count > (count - off))
175                 return -ENXIO;
176
177 #ifdef NOT_COHERENT_CACHE
178         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
179         pfn = consistent_virt_to_pfn(cpu_addr);
180 #else
181         pfn = virt_to_pfn(cpu_addr);
182 #endif
183         return remap_pfn_range(vma, vma->vm_start, pfn + off,
184                                vma->vm_end - vma->vm_start, vma->vm_page_prot);
185 #else
186         return -ENXIO;
187 #endif
188 }
189
190 const struct dma_map_ops dma_direct_ops = {
191         .alloc          = dma_direct_alloc_coherent,
192         .free           = dma_direct_free_coherent,
193         .mmap           = dma_direct_mmap_coherent,
194         .map_sg         = dma_direct_map_sg,
195         .dma_supported  = dma_direct_dma_supported,
196         .map_page       = dma_direct_map_page,
197         .unmap_page     = dma_direct_unmap_page,
198         .sync_single_for_cpu            = dma_direct_sync_single_for_cpu,
199         .sync_single_for_device         = dma_direct_sync_single_for_device,
200         .sync_sg_for_cpu                = dma_direct_sync_sg_for_cpu,
201         .sync_sg_for_device             = dma_direct_sync_sg_for_device,
202 };
203 EXPORT_SYMBOL(dma_direct_ops);
204
205 /* Number of entries preallocated for DMA-API debugging */
206 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
207
208 static int __init dma_init(void)
209 {
210         dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
211
212         return 0;
213 }
214 fs_initcall(dma_init);