GNU Linux-libre 4.9.333-gnu1
[releases.git] / arch / avr32 / mm / dma-coherent.c
1 /*
2  *  Copyright (C) 2004-2006 Atmel Corporation
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8
9 #include <linux/dma-mapping.h>
10 #include <linux/gfp.h>
11 #include <linux/export.h>
12 #include <linux/mm.h>
13 #include <linux/device.h>
14 #include <linux/scatterlist.h>
15
16 #include <asm/processor.h>
17 #include <asm/cacheflush.h>
18 #include <asm/io.h>
19 #include <asm/addrspace.h>
20
21 void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
22 {
23         /*
24          * No need to sync an uncached area
25          */
26         if (PXSEG(vaddr) == P2SEG)
27                 return;
28
29         switch (direction) {
30         case DMA_FROM_DEVICE:           /* invalidate only */
31                 invalidate_dcache_region(vaddr, size);
32                 break;
33         case DMA_TO_DEVICE:             /* writeback only */
34                 clean_dcache_region(vaddr, size);
35                 break;
36         case DMA_BIDIRECTIONAL:         /* writeback and invalidate */
37                 flush_dcache_region(vaddr, size);
38                 break;
39         default:
40                 BUG();
41         }
42 }
43 EXPORT_SYMBOL(dma_cache_sync);
44
45 static struct page *__dma_alloc(struct device *dev, size_t size,
46                                 dma_addr_t *handle, gfp_t gfp)
47 {
48         struct page *page, *free, *end;
49         int order;
50
51         /* Following is a work-around (a.k.a. hack) to prevent pages
52          * with __GFP_COMP being passed to split_page() which cannot
53          * handle them.  The real problem is that this flag probably
54          * should be 0 on AVR32 as it is not supported on this
55          * platform--see CONFIG_HUGETLB_PAGE. */
56         gfp &= ~(__GFP_COMP);
57
58         size = PAGE_ALIGN(size);
59         order = get_order(size);
60
61         page = alloc_pages(gfp, order);
62         if (!page)
63                 return NULL;
64         split_page(page, order);
65
66         /*
67          * When accessing physical memory with valid cache data, we
68          * get a cache hit even if the virtual memory region is marked
69          * as uncached.
70          *
71          * Since the memory is newly allocated, there is no point in
72          * doing a writeback. If the previous owner cares, he should
73          * have flushed the cache before releasing the memory.
74          */
75         invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size);
76
77         *handle = page_to_bus(page);
78         free = page + (size >> PAGE_SHIFT);
79         end = page + (1 << order);
80
81         /*
82          * Free any unused pages
83          */
84         while (free < end) {
85                 __free_page(free);
86                 free++;
87         }
88
89         return page;
90 }
91
92 static void __dma_free(struct device *dev, size_t size,
93                        struct page *page, dma_addr_t handle)
94 {
95         struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
96
97         while (page < end)
98                 __free_page(page++);
99 }
100
101 static void *avr32_dma_alloc(struct device *dev, size_t size,
102                 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
103 {
104         struct page *page;
105         dma_addr_t phys;
106
107         page = __dma_alloc(dev, size, handle, gfp);
108         if (!page)
109                 return NULL;
110         phys = page_to_phys(page);
111
112         if (attrs & DMA_ATTR_WRITE_COMBINE) {
113                 /* Now, map the page into P3 with write-combining turned on */
114                 *handle = phys;
115                 return __ioremap(phys, size, _PAGE_BUFFER);
116         } else {
117                 return phys_to_uncached(phys);
118         }
119 }
120
121 static void avr32_dma_free(struct device *dev, size_t size,
122                 void *cpu_addr, dma_addr_t handle, unsigned long attrs)
123 {
124         struct page *page;
125
126         if (attrs & DMA_ATTR_WRITE_COMBINE) {
127                 iounmap(cpu_addr);
128
129                 page = phys_to_page(handle);
130         } else {
131                 void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
132
133                 pr_debug("avr32_dma_free addr %p (phys %08lx) size %u\n",
134                          cpu_addr, (unsigned long)handle, (unsigned)size);
135
136                 BUG_ON(!virt_addr_valid(addr));
137                 page = virt_to_page(addr);
138         }
139
140         __dma_free(dev, size, page, handle);
141 }
142
143 static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page,
144                 unsigned long offset, size_t size,
145                 enum dma_data_direction direction, unsigned long attrs)
146 {
147         void *cpu_addr = page_address(page) + offset;
148
149         dma_cache_sync(dev, cpu_addr, size, direction);
150         return virt_to_bus(cpu_addr);
151 }
152
153 static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist,
154                 int nents, enum dma_data_direction direction,
155                 unsigned long attrs)
156 {
157         int i;
158         struct scatterlist *sg;
159
160         for_each_sg(sglist, sg, nents, i) {
161                 char *virt;
162
163                 sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
164                 virt = sg_virt(sg);
165                 dma_cache_sync(dev, virt, sg->length, direction);
166         }
167
168         return nents;
169 }
170
171 static void avr32_dma_sync_single_for_device(struct device *dev,
172                 dma_addr_t dma_handle, size_t size,
173                 enum dma_data_direction direction)
174 {
175         dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
176 }
177
178 static void avr32_dma_sync_sg_for_device(struct device *dev,
179                 struct scatterlist *sglist, int nents,
180                 enum dma_data_direction direction)
181 {
182         int i;
183         struct scatterlist *sg;
184
185         for_each_sg(sglist, sg, nents, i)
186                 dma_cache_sync(dev, sg_virt(sg), sg->length, direction);
187 }
188
189 struct dma_map_ops avr32_dma_ops = {
190         .alloc                  = avr32_dma_alloc,
191         .free                   = avr32_dma_free,
192         .map_page               = avr32_dma_map_page,
193         .map_sg                 = avr32_dma_map_sg,
194         .sync_single_for_device = avr32_dma_sync_single_for_device,
195         .sync_sg_for_device     = avr32_dma_sync_sg_for_device,
196 };
197 EXPORT_SYMBOL(avr32_dma_ops);