2 * SWIOTLB-based DMA API implementation
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/gfp.h>
21 #include <linux/acpi.h>
22 #include <linux/bootmem.h>
23 #include <linux/cache.h>
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/genalloc.h>
27 #include <linux/dma-direct.h>
28 #include <linux/dma-contiguous.h>
29 #include <linux/vmalloc.h>
30 #include <linux/swiotlb.h>
31 #include <linux/pci.h>
33 #include <asm/cacheflush.h>
35 static int swiotlb __ro_after_init;
37 static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
40 if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
41 return pgprot_writecombine(prot);
45 static struct gen_pool *atomic_pool __ro_after_init;
47 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
48 static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
50 static int __init early_coherent_pool(char *p)
52 atomic_pool_size = memparse(p, &p);
55 early_param("coherent_pool", early_coherent_pool);
57 static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
63 WARN(1, "coherent pool not initialised!\n");
67 val = gen_pool_alloc(atomic_pool, size);
69 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
71 *ret_page = phys_to_page(phys);
79 static bool __in_atomic_pool(void *start, size_t size)
81 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
84 static int __free_from_pool(void *start, size_t size)
86 if (!__in_atomic_pool(start, size))
89 gen_pool_free(atomic_pool, (unsigned long)start, size);
94 static void *__dma_alloc(struct device *dev, size_t size,
95 dma_addr_t *dma_handle, gfp_t flags,
99 void *ptr, *coherent_ptr;
100 bool coherent = is_device_dma_coherent(dev);
101 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
103 size = PAGE_ALIGN(size);
105 if (!coherent && !gfpflags_allow_blocking(flags)) {
106 struct page *page = NULL;
107 void *addr = __alloc_from_pool(size, &page, flags);
110 *dma_handle = phys_to_dma(dev, page_to_phys(page));
115 ptr = swiotlb_alloc(dev, size, dma_handle, flags, attrs);
119 /* no need for non-cacheable mapping if coherent */
123 /* remove any dirty cache lines on the kernel alias */
124 __dma_flush_area(ptr, size);
126 /* create a coherent mapping */
127 page = virt_to_page(ptr);
128 coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
129 prot, __builtin_return_address(0));
136 swiotlb_free(dev, size, ptr, *dma_handle, attrs);
141 static void __dma_free(struct device *dev, size_t size,
142 void *vaddr, dma_addr_t dma_handle,
145 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
147 size = PAGE_ALIGN(size);
149 if (!is_device_dma_coherent(dev)) {
150 if (__free_from_pool(vaddr, size))
154 swiotlb_free(dev, size, swiotlb_addr, dma_handle, attrs);
157 static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
158 unsigned long offset, size_t size,
159 enum dma_data_direction dir,
164 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
165 if (!is_device_dma_coherent(dev) &&
166 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
167 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
173 static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
174 size_t size, enum dma_data_direction dir,
177 if (!is_device_dma_coherent(dev) &&
178 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
179 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
180 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
183 static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
184 int nelems, enum dma_data_direction dir,
187 struct scatterlist *sg;
190 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
191 if (!is_device_dma_coherent(dev) &&
192 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
193 for_each_sg(sgl, sg, ret, i)
194 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
200 static void __swiotlb_unmap_sg_attrs(struct device *dev,
201 struct scatterlist *sgl, int nelems,
202 enum dma_data_direction dir,
205 struct scatterlist *sg;
208 if (!is_device_dma_coherent(dev) &&
209 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
210 for_each_sg(sgl, sg, nelems, i)
211 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
213 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
216 static void __swiotlb_sync_single_for_cpu(struct device *dev,
217 dma_addr_t dev_addr, size_t size,
218 enum dma_data_direction dir)
220 if (!is_device_dma_coherent(dev))
221 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
222 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
225 static void __swiotlb_sync_single_for_device(struct device *dev,
226 dma_addr_t dev_addr, size_t size,
227 enum dma_data_direction dir)
229 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
230 if (!is_device_dma_coherent(dev))
231 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
234 static void __swiotlb_sync_sg_for_cpu(struct device *dev,
235 struct scatterlist *sgl, int nelems,
236 enum dma_data_direction dir)
238 struct scatterlist *sg;
241 if (!is_device_dma_coherent(dev))
242 for_each_sg(sgl, sg, nelems, i)
243 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
245 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
248 static void __swiotlb_sync_sg_for_device(struct device *dev,
249 struct scatterlist *sgl, int nelems,
250 enum dma_data_direction dir)
252 struct scatterlist *sg;
255 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
256 if (!is_device_dma_coherent(dev))
257 for_each_sg(sgl, sg, nelems, i)
258 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
262 static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
263 unsigned long pfn, size_t size)
266 unsigned long nr_vma_pages = vma_pages(vma);
267 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
268 unsigned long off = vma->vm_pgoff;
270 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
271 ret = remap_pfn_range(vma, vma->vm_start,
273 vma->vm_end - vma->vm_start,
280 static int __swiotlb_mmap(struct device *dev,
281 struct vm_area_struct *vma,
282 void *cpu_addr, dma_addr_t dma_addr, size_t size,
286 unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
288 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
289 is_device_dma_coherent(dev));
291 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
294 return __swiotlb_mmap_pfn(vma, pfn, size);
297 static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
298 struct page *page, size_t size)
300 int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
303 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
308 static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
309 void *cpu_addr, dma_addr_t handle, size_t size,
312 struct page *page = phys_to_page(dma_to_phys(dev, handle));
314 return __swiotlb_get_sgtable_page(sgt, page, size);
317 static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
320 return swiotlb_dma_supported(hwdev, mask);
324 static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
327 return swiotlb_dma_mapping_error(hwdev, addr);
331 static const struct dma_map_ops arm64_swiotlb_dma_ops = {
332 .alloc = __dma_alloc,
334 .mmap = __swiotlb_mmap,
335 .get_sgtable = __swiotlb_get_sgtable,
336 .map_page = __swiotlb_map_page,
337 .unmap_page = __swiotlb_unmap_page,
338 .map_sg = __swiotlb_map_sg_attrs,
339 .unmap_sg = __swiotlb_unmap_sg_attrs,
340 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
341 .sync_single_for_device = __swiotlb_sync_single_for_device,
342 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
343 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
344 .dma_supported = __swiotlb_dma_supported,
345 .mapping_error = __swiotlb_dma_mapping_error,
348 static int __init atomic_pool_init(void)
350 pgprot_t prot = __pgprot(PROT_NORMAL_NC);
351 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
354 unsigned int pool_size_order = get_order(atomic_pool_size);
356 if (dev_get_cma_area(NULL))
357 page = dma_alloc_from_contiguous(NULL, nr_pages,
358 pool_size_order, false);
360 page = alloc_pages(GFP_DMA32, pool_size_order);
364 void *page_addr = page_address(page);
366 memset(page_addr, 0, atomic_pool_size);
367 __dma_flush_area(page_addr, atomic_pool_size);
369 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
373 addr = dma_common_contiguous_remap(page, atomic_pool_size,
374 VM_USERMAP, prot, atomic_pool_init);
377 goto destroy_genpool;
379 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
381 atomic_pool_size, -1);
385 gen_pool_set_algo(atomic_pool,
386 gen_pool_first_fit_order_align,
389 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
390 atomic_pool_size / 1024);
396 dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
398 gen_pool_destroy(atomic_pool);
401 if (!dma_release_from_contiguous(NULL, page, nr_pages))
402 __free_pages(page, pool_size_order);
404 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
405 atomic_pool_size / 1024);
409 /********************************************
410 * The following APIs are for dummy DMA ops *
411 ********************************************/
413 static void *__dummy_alloc(struct device *dev, size_t size,
414 dma_addr_t *dma_handle, gfp_t flags,
420 static void __dummy_free(struct device *dev, size_t size,
421 void *vaddr, dma_addr_t dma_handle,
426 static int __dummy_mmap(struct device *dev,
427 struct vm_area_struct *vma,
428 void *cpu_addr, dma_addr_t dma_addr, size_t size,
434 static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
435 unsigned long offset, size_t size,
436 enum dma_data_direction dir,
442 static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
443 size_t size, enum dma_data_direction dir,
448 static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
449 int nelems, enum dma_data_direction dir,
455 static void __dummy_unmap_sg(struct device *dev,
456 struct scatterlist *sgl, int nelems,
457 enum dma_data_direction dir,
462 static void __dummy_sync_single(struct device *dev,
463 dma_addr_t dev_addr, size_t size,
464 enum dma_data_direction dir)
468 static void __dummy_sync_sg(struct device *dev,
469 struct scatterlist *sgl, int nelems,
470 enum dma_data_direction dir)
474 static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
479 static int __dummy_dma_supported(struct device *hwdev, u64 mask)
484 const struct dma_map_ops dummy_dma_ops = {
485 .alloc = __dummy_alloc,
486 .free = __dummy_free,
487 .mmap = __dummy_mmap,
488 .map_page = __dummy_map_page,
489 .unmap_page = __dummy_unmap_page,
490 .map_sg = __dummy_map_sg,
491 .unmap_sg = __dummy_unmap_sg,
492 .sync_single_for_cpu = __dummy_sync_single,
493 .sync_single_for_device = __dummy_sync_single,
494 .sync_sg_for_cpu = __dummy_sync_sg,
495 .sync_sg_for_device = __dummy_sync_sg,
496 .mapping_error = __dummy_mapping_error,
497 .dma_supported = __dummy_dma_supported,
499 EXPORT_SYMBOL(dummy_dma_ops);
501 static int __init arm64_dma_init(void)
503 if (swiotlb_force == SWIOTLB_FORCE ||
504 max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
507 WARN_TAINT(ARCH_DMA_MINALIGN < cache_line_size(),
508 TAINT_CPU_OUT_OF_SPEC,
509 "ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)",
510 ARCH_DMA_MINALIGN, cache_line_size());
512 return atomic_pool_init();
514 arch_initcall(arm64_dma_init);
516 #ifdef CONFIG_IOMMU_DMA
517 #include <linux/dma-iommu.h>
518 #include <linux/platform_device.h>
519 #include <linux/amba/bus.h>
521 /* Thankfully, all cache ops are by VA so we can ignore phys here */
522 static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
524 __dma_flush_area(virt, PAGE_SIZE);
527 static void *__iommu_alloc_attrs(struct device *dev, size_t size,
528 dma_addr_t *handle, gfp_t gfp,
531 bool coherent = is_device_dma_coherent(dev);
532 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
533 size_t iosize = size;
536 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
539 size = PAGE_ALIGN(size);
542 * Some drivers rely on this, and we probably don't want the
543 * possibility of stale kernel data being read by devices anyway.
547 if (!gfpflags_allow_blocking(gfp)) {
550 * In atomic context we can't remap anything, so we'll only
551 * get the virtually contiguous buffer we need by way of a
552 * physically contiguous allocation.
555 page = alloc_pages(gfp, get_order(size));
556 addr = page ? page_address(page) : NULL;
558 addr = __alloc_from_pool(size, &page, gfp);
563 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
564 if (iommu_dma_mapping_error(dev, *handle)) {
566 __free_pages(page, get_order(size));
568 __free_from_pool(addr, size);
571 } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
572 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
575 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
576 get_order(size), gfp & __GFP_NOWARN);
580 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
581 if (iommu_dma_mapping_error(dev, *handle)) {
582 dma_release_from_contiguous(dev, page,
586 addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
588 __builtin_return_address(0));
591 __dma_flush_area(page_to_virt(page), iosize);
592 memset(addr, 0, size);
594 iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
595 dma_release_from_contiguous(dev, page,
599 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
602 pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
607 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
608 __builtin_return_address(0));
610 iommu_dma_free(dev, pages, iosize, handle);
615 static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
616 dma_addr_t handle, unsigned long attrs)
618 size_t iosize = size;
620 size = PAGE_ALIGN(size);
622 * @cpu_addr will be one of 4 things depending on how it was allocated:
623 * - A remapped array of pages for contiguous allocations.
624 * - A remapped array of pages from iommu_dma_alloc(), for all
625 * non-atomic allocations.
626 * - A non-cacheable alias from the atomic pool, for atomic
627 * allocations by non-coherent devices.
628 * - A normal lowmem address, for atomic allocations by
630 * Hence how dodgy the below logic looks...
632 if (__in_atomic_pool(cpu_addr, size)) {
633 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
634 __free_from_pool(cpu_addr, size);
635 } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
636 struct page *page = vmalloc_to_page(cpu_addr);
638 iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
639 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
640 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
641 } else if (is_vmalloc_addr(cpu_addr)){
642 struct vm_struct *area = find_vm_area(cpu_addr);
644 if (WARN_ON(!area || !area->pages))
646 iommu_dma_free(dev, area->pages, iosize, &handle);
647 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
649 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
650 __free_pages(virt_to_page(cpu_addr), get_order(size));
654 static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
655 void *cpu_addr, dma_addr_t dma_addr, size_t size,
658 struct vm_struct *area;
661 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
662 is_device_dma_coherent(dev));
664 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
667 if (!is_vmalloc_addr(cpu_addr)) {
668 unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
669 return __swiotlb_mmap_pfn(vma, pfn, size);
672 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
674 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
675 * hence in the vmalloc space.
677 unsigned long pfn = vmalloc_to_pfn(cpu_addr);
678 return __swiotlb_mmap_pfn(vma, pfn, size);
681 area = find_vm_area(cpu_addr);
682 if (WARN_ON(!area || !area->pages))
685 return iommu_dma_mmap(area->pages, size, vma);
688 static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
689 void *cpu_addr, dma_addr_t dma_addr,
690 size_t size, unsigned long attrs)
692 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
693 struct vm_struct *area = find_vm_area(cpu_addr);
695 if (!is_vmalloc_addr(cpu_addr)) {
696 struct page *page = virt_to_page(cpu_addr);
697 return __swiotlb_get_sgtable_page(sgt, page, size);
700 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
702 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
703 * hence in the vmalloc space.
705 struct page *page = vmalloc_to_page(cpu_addr);
706 return __swiotlb_get_sgtable_page(sgt, page, size);
709 if (WARN_ON(!area || !area->pages))
712 return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
716 static void __iommu_sync_single_for_cpu(struct device *dev,
717 dma_addr_t dev_addr, size_t size,
718 enum dma_data_direction dir)
722 if (is_device_dma_coherent(dev))
725 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
726 __dma_unmap_area(phys_to_virt(phys), size, dir);
729 static void __iommu_sync_single_for_device(struct device *dev,
730 dma_addr_t dev_addr, size_t size,
731 enum dma_data_direction dir)
735 if (is_device_dma_coherent(dev))
738 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
739 __dma_map_area(phys_to_virt(phys), size, dir);
742 static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
743 unsigned long offset, size_t size,
744 enum dma_data_direction dir,
747 bool coherent = is_device_dma_coherent(dev);
748 int prot = dma_info_to_prot(dir, coherent, attrs);
749 dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
751 if (!iommu_dma_mapping_error(dev, dev_addr) &&
752 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
753 __iommu_sync_single_for_device(dev, dev_addr, size, dir);
758 static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
759 size_t size, enum dma_data_direction dir,
762 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
763 __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
765 iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
768 static void __iommu_sync_sg_for_cpu(struct device *dev,
769 struct scatterlist *sgl, int nelems,
770 enum dma_data_direction dir)
772 struct scatterlist *sg;
775 if (is_device_dma_coherent(dev))
778 for_each_sg(sgl, sg, nelems, i)
779 __dma_unmap_area(sg_virt(sg), sg->length, dir);
782 static void __iommu_sync_sg_for_device(struct device *dev,
783 struct scatterlist *sgl, int nelems,
784 enum dma_data_direction dir)
786 struct scatterlist *sg;
789 if (is_device_dma_coherent(dev))
792 for_each_sg(sgl, sg, nelems, i)
793 __dma_map_area(sg_virt(sg), sg->length, dir);
796 static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
797 int nelems, enum dma_data_direction dir,
800 bool coherent = is_device_dma_coherent(dev);
802 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
803 __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
805 return iommu_dma_map_sg(dev, sgl, nelems,
806 dma_info_to_prot(dir, coherent, attrs));
809 static void __iommu_unmap_sg_attrs(struct device *dev,
810 struct scatterlist *sgl, int nelems,
811 enum dma_data_direction dir,
814 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
815 __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
817 iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
820 static const struct dma_map_ops iommu_dma_ops = {
821 .alloc = __iommu_alloc_attrs,
822 .free = __iommu_free_attrs,
823 .mmap = __iommu_mmap_attrs,
824 .get_sgtable = __iommu_get_sgtable,
825 .map_page = __iommu_map_page,
826 .unmap_page = __iommu_unmap_page,
827 .map_sg = __iommu_map_sg_attrs,
828 .unmap_sg = __iommu_unmap_sg_attrs,
829 .sync_single_for_cpu = __iommu_sync_single_for_cpu,
830 .sync_single_for_device = __iommu_sync_single_for_device,
831 .sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
832 .sync_sg_for_device = __iommu_sync_sg_for_device,
833 .map_resource = iommu_dma_map_resource,
834 .unmap_resource = iommu_dma_unmap_resource,
835 .mapping_error = iommu_dma_mapping_error,
838 static int __init __iommu_dma_init(void)
840 return iommu_dma_init();
842 arch_initcall(__iommu_dma_init);
844 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
845 const struct iommu_ops *ops)
847 struct iommu_domain *domain;
853 * The IOMMU core code allocates the default DMA domain, which the
854 * underlying IOMMU driver needs to support via the dma-iommu layer.
856 domain = iommu_get_domain_for_dev(dev);
861 if (domain->type == IOMMU_DOMAIN_DMA) {
862 if (iommu_dma_init_domain(domain, dma_base, size, dev))
865 dev->dma_ops = &iommu_dma_ops;
871 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
875 void arch_teardown_dma_ops(struct device *dev)
882 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
883 const struct iommu_ops *iommu)
886 #endif /* CONFIG_IOMMU_DMA */
888 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
889 const struct iommu_ops *iommu, bool coherent)
892 dev->dma_ops = &arm64_swiotlb_dma_ops;
894 dev->archdata.dma_coherent = coherent;
895 __iommu_setup_dma_ops(dev, dma_base, size, iommu);
898 if (xen_initial_domain()) {
899 dev->archdata.dev_dma_ops = dev->dma_ops;
900 dev->dma_ops = xen_dma_ops;