GNU Linux-libre 5.4.274-gnu1
[releases.git] / arch / xtensa / kernel / pci-dma.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * DMA coherent memory allocation.
4  *
5  * Copyright (C) 2002 - 2005 Tensilica Inc.
6  * Copyright (C) 2015 Cadence Design Systems Inc.
7  *
8  * Based on version for i386.
9  *
10  * Chris Zankel <chris@zankel.net>
11  * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
12  */
13
14 #include <linux/dma-contiguous.h>
15 #include <linux/dma-noncoherent.h>
16 #include <linux/dma-direct.h>
17 #include <linux/gfp.h>
18 #include <linux/highmem.h>
19 #include <linux/mm.h>
20 #include <linux/types.h>
21 #include <asm/cacheflush.h>
22 #include <asm/io.h>
23 #include <asm/platform.h>
24
25 static void do_cache_op(phys_addr_t paddr, size_t size,
26                         void (*fn)(unsigned long, unsigned long))
27 {
28         unsigned long off = paddr & (PAGE_SIZE - 1);
29         unsigned long pfn = PFN_DOWN(paddr);
30         struct page *page = pfn_to_page(pfn);
31
32         if (!PageHighMem(page))
33                 fn((unsigned long)phys_to_virt(paddr), size);
34         else
35                 while (size > 0) {
36                         size_t sz = min_t(size_t, size, PAGE_SIZE - off);
37                         void *vaddr = kmap_atomic(page);
38
39                         fn((unsigned long)vaddr + off, sz);
40                         kunmap_atomic(vaddr);
41                         off = 0;
42                         ++page;
43                         size -= sz;
44                 }
45 }
46
47 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
48                 enum dma_data_direction dir)
49 {
50         switch (dir) {
51         case DMA_BIDIRECTIONAL:
52         case DMA_FROM_DEVICE:
53                 do_cache_op(paddr, size, __invalidate_dcache_range);
54                 break;
55
56         case DMA_NONE:
57                 BUG();
58                 break;
59
60         default:
61                 break;
62         }
63 }
64
65 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
66                 enum dma_data_direction dir)
67 {
68         switch (dir) {
69         case DMA_BIDIRECTIONAL:
70         case DMA_TO_DEVICE:
71                 if (XCHAL_DCACHE_IS_WRITEBACK)
72                         do_cache_op(paddr, size, __flush_dcache_range);
73                 break;
74
75         case DMA_NONE:
76                 BUG();
77                 break;
78
79         default:
80                 break;
81         }
82 }
83
84 #ifdef CONFIG_MMU
85 bool platform_vaddr_cached(const void *p)
86 {
87         unsigned long addr = (unsigned long)p;
88
89         return addr >= XCHAL_KSEG_CACHED_VADDR &&
90                addr - XCHAL_KSEG_CACHED_VADDR < XCHAL_KSEG_SIZE;
91 }
92
93 bool platform_vaddr_uncached(const void *p)
94 {
95         unsigned long addr = (unsigned long)p;
96
97         return addr >= XCHAL_KSEG_BYPASS_VADDR &&
98                addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE;
99 }
100
101 void *platform_vaddr_to_uncached(void *p)
102 {
103         return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
104 }
105
106 void *platform_vaddr_to_cached(void *p)
107 {
108         return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
109 }
110 #else
111 bool __attribute__((weak)) platform_vaddr_cached(const void *p)
112 {
113         WARN_ONCE(1, "Default %s implementation is used\n", __func__);
114         return true;
115 }
116
117 bool __attribute__((weak)) platform_vaddr_uncached(const void *p)
118 {
119         WARN_ONCE(1, "Default %s implementation is used\n", __func__);
120         return false;
121 }
122
123 void __attribute__((weak)) *platform_vaddr_to_uncached(void *p)
124 {
125         WARN_ONCE(1, "Default %s implementation is used\n", __func__);
126         return p;
127 }
128
129 void __attribute__((weak)) *platform_vaddr_to_cached(void *p)
130 {
131         WARN_ONCE(1, "Default %s implementation is used\n", __func__);
132         return p;
133 }
134 #endif
135
136 /*
137  * Note: We assume that the full memory space is always mapped to 'kseg'
138  *       Otherwise we have to use page attributes (not implemented).
139  */
140
141 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
142                 gfp_t flag, unsigned long attrs)
143 {
144         unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
145         struct page *page = NULL;
146
147         /* ignore region speicifiers */
148
149         flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
150
151         if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
152                 flag |= GFP_DMA;
153
154         if (gfpflags_allow_blocking(flag))
155                 page = dma_alloc_from_contiguous(dev, count, get_order(size),
156                                                  flag & __GFP_NOWARN);
157
158         if (!page)
159                 page = alloc_pages(flag | __GFP_ZERO, get_order(size));
160
161         if (!page)
162                 return NULL;
163
164         *handle = phys_to_dma(dev, page_to_phys(page));
165
166 #ifdef CONFIG_MMU
167         if (PageHighMem(page)) {
168                 void *p;
169
170                 p = dma_common_contiguous_remap(page, size,
171                                                 pgprot_noncached(PAGE_KERNEL),
172                                                 __builtin_return_address(0));
173                 if (!p) {
174                         if (!dma_release_from_contiguous(dev, page, count))
175                                 __free_pages(page, get_order(size));
176                 }
177                 return p;
178         }
179 #endif
180         BUG_ON(!platform_vaddr_cached(page_address(page)));
181         __invalidate_dcache_range((unsigned long)page_address(page), size);
182         return platform_vaddr_to_uncached(page_address(page));
183 }
184
185 void arch_dma_free(struct device *dev, size_t size, void *vaddr,
186                 dma_addr_t dma_handle, unsigned long attrs)
187 {
188         unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
189         struct page *page;
190
191         if (platform_vaddr_uncached(vaddr)) {
192                 page = virt_to_page(platform_vaddr_to_cached(vaddr));
193         } else {
194 #ifdef CONFIG_MMU
195                 dma_common_free_remap(vaddr, size);
196 #endif
197                 page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
198         }
199
200         if (!dma_release_from_contiguous(dev, page, count))
201                 __free_pages(page, get_order(size));
202 }