2 * DMA coherent memory allocation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
9 * Copyright (C) 2002 - 2005 Tensilica Inc.
10 * Copyright (C) 2015 Cadence Design Systems Inc.
12 * Based on version for i386.
14 * Chris Zankel <chris@zankel.net>
15 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
18 #include <linux/dma-contiguous.h>
19 #include <linux/dma-noncoherent.h>
20 #include <linux/dma-direct.h>
21 #include <linux/gfp.h>
22 #include <linux/highmem.h>
24 #include <linux/types.h>
25 #include <asm/cacheflush.h>
27 #include <asm/platform.h>
29 static void do_cache_op(phys_addr_t paddr, size_t size,
30 void (*fn)(unsigned long, unsigned long))
32 unsigned long off = paddr & (PAGE_SIZE - 1);
33 unsigned long pfn = PFN_DOWN(paddr);
34 struct page *page = pfn_to_page(pfn);
36 if (!PageHighMem(page))
37 fn((unsigned long)phys_to_virt(paddr), size);
40 size_t sz = min_t(size_t, size, PAGE_SIZE - off);
41 void *vaddr = kmap_atomic(page);
43 fn((unsigned long)vaddr + off, sz);
51 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
52 size_t size, enum dma_data_direction dir)
55 case DMA_BIDIRECTIONAL:
57 do_cache_op(paddr, size, __invalidate_dcache_range);
69 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
70 size_t size, enum dma_data_direction dir)
73 case DMA_BIDIRECTIONAL:
75 if (XCHAL_DCACHE_IS_WRITEBACK)
76 do_cache_op(paddr, size, __flush_dcache_range);
89 bool platform_vaddr_cached(const void *p)
91 unsigned long addr = (unsigned long)p;
93 return addr >= XCHAL_KSEG_CACHED_VADDR &&
94 addr - XCHAL_KSEG_CACHED_VADDR < XCHAL_KSEG_SIZE;
97 bool platform_vaddr_uncached(const void *p)
99 unsigned long addr = (unsigned long)p;
101 return addr >= XCHAL_KSEG_BYPASS_VADDR &&
102 addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE;
105 void *platform_vaddr_to_uncached(void *p)
107 return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
110 void *platform_vaddr_to_cached(void *p)
112 return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
115 bool __attribute__((weak)) platform_vaddr_cached(const void *p)
117 WARN_ONCE(1, "Default %s implementation is used\n", __func__);
121 bool __attribute__((weak)) platform_vaddr_uncached(const void *p)
123 WARN_ONCE(1, "Default %s implementation is used\n", __func__);
127 void __attribute__((weak)) *platform_vaddr_to_uncached(void *p)
129 WARN_ONCE(1, "Default %s implementation is used\n", __func__);
133 void __attribute__((weak)) *platform_vaddr_to_cached(void *p)
135 WARN_ONCE(1, "Default %s implementation is used\n", __func__);
141 * Note: We assume that the full memory space is always mapped to 'kseg'
142 * Otherwise we have to use page attributes (not implemented).
145 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
146 gfp_t flag, unsigned long attrs)
148 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
149 struct page *page = NULL;
151 /* ignore region speicifiers */
153 flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
155 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
158 if (gfpflags_allow_blocking(flag))
159 page = dma_alloc_from_contiguous(dev, count, get_order(size),
160 flag & __GFP_NOWARN);
163 page = alloc_pages(flag, get_order(size));
168 *handle = phys_to_dma(dev, page_to_phys(page));
170 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
175 if (PageHighMem(page)) {
178 p = dma_common_contiguous_remap(page, size, VM_MAP,
179 pgprot_noncached(PAGE_KERNEL),
180 __builtin_return_address(0));
182 if (!dma_release_from_contiguous(dev, page, count))
183 __free_pages(page, get_order(size));
188 BUG_ON(!platform_vaddr_cached(page_address(page)));
189 __invalidate_dcache_range((unsigned long)page_address(page), size);
190 return platform_vaddr_to_uncached(page_address(page));
193 void arch_dma_free(struct device *dev, size_t size, void *vaddr,
194 dma_addr_t dma_handle, unsigned long attrs)
196 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
199 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
201 } else if (platform_vaddr_uncached(vaddr)) {
202 page = virt_to_page(platform_vaddr_to_cached(vaddr));
205 dma_common_free_remap(vaddr, size, VM_MAP);
207 page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
210 if (!dma_release_from_contiguous(dev, page, count))
211 __free_pages(page, get_order(size));