1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2012
6 * Jan Glauber <jang@linux.vnet.ibm.com>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/export.h>
12 #include <linux/iommu-helper.h>
13 #include <linux/dma-map-ops.h>
14 #include <linux/vmalloc.h>
15 #include <linux/pci.h>
16 #include <asm/pci_dma.h>
18 static struct kmem_cache *dma_region_table_cache;
19 static struct kmem_cache *dma_page_table_cache;
20 static int s390_iommu_strict;
21 static u64 s390_iommu_aperture;
22 static u32 s390_iommu_aperture_factor = 1;
24 static int zpci_refresh_global(struct zpci_dev *zdev)
26 return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma,
27 zdev->iommu_pages * PAGE_SIZE);
30 unsigned long *dma_alloc_cpu_table(void)
32 unsigned long *table, *entry;
34 table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
38 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
39 *entry = ZPCI_TABLE_INVALID;
43 static void dma_free_cpu_table(void *table)
45 kmem_cache_free(dma_region_table_cache, table);
48 static unsigned long *dma_alloc_page_table(void)
50 unsigned long *table, *entry;
52 table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
56 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
57 *entry = ZPCI_PTE_INVALID;
61 static void dma_free_page_table(void *table)
63 kmem_cache_free(dma_page_table_cache, table);
66 static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
70 if (reg_entry_isvalid(*entry))
71 sto = get_rt_sto(*entry);
73 sto = dma_alloc_cpu_table();
77 set_rt_sto(entry, virt_to_phys(sto));
78 validate_rt_entry(entry);
79 entry_clr_protected(entry);
84 static unsigned long *dma_get_page_table_origin(unsigned long *entry)
88 if (reg_entry_isvalid(*entry))
89 pto = get_st_pto(*entry);
91 pto = dma_alloc_page_table();
94 set_st_pto(entry, virt_to_phys(pto));
95 validate_st_entry(entry);
96 entry_clr_protected(entry);
101 unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
103 unsigned long *sto, *pto;
104 unsigned int rtx, sx, px;
106 rtx = calc_rtx(dma_addr);
107 sto = dma_get_seg_table_origin(&rto[rtx]);
111 sx = calc_sx(dma_addr);
112 pto = dma_get_page_table_origin(&sto[sx]);
116 px = calc_px(dma_addr);
120 void dma_update_cpu_trans(unsigned long *entry, phys_addr_t page_addr, int flags)
122 if (flags & ZPCI_PTE_INVALID) {
123 invalidate_pt_entry(entry);
125 set_pt_pfaa(entry, page_addr);
126 validate_pt_entry(entry);
129 if (flags & ZPCI_TABLE_PROTECTED)
130 entry_set_protected(entry);
132 entry_clr_protected(entry);
135 static int __dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa,
136 dma_addr_t dma_addr, size_t size, int flags)
138 unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
139 phys_addr_t page_addr = (pa & PAGE_MASK);
140 unsigned long irq_flags;
141 unsigned long *entry;
147 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
148 if (!zdev->dma_table) {
153 for (i = 0; i < nr_pages; i++) {
154 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
159 dma_update_cpu_trans(entry, page_addr, flags);
160 page_addr += PAGE_SIZE;
161 dma_addr += PAGE_SIZE;
165 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
166 flags = ZPCI_PTE_INVALID;
168 page_addr -= PAGE_SIZE;
169 dma_addr -= PAGE_SIZE;
170 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
173 dma_update_cpu_trans(entry, page_addr, flags);
177 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
181 static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
182 size_t size, int flags)
184 unsigned long irqflags;
188 * With zdev->tlb_refresh == 0, rpcit is not required to establish new
189 * translations when previously invalid translation-table entries are
190 * validated. With lazy unmap, rpcit is skipped for previously valid
191 * entries, but a global rpcit is then required before any address can
192 * be re-used, i.e. after each iommu bitmap wrap-around.
194 if ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID) {
195 if (!zdev->tlb_refresh)
198 if (!s390_iommu_strict)
202 ret = zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
204 if (ret == -ENOMEM && !s390_iommu_strict) {
205 /* enable the hypervisor to free some resources */
206 if (zpci_refresh_global(zdev))
209 spin_lock_irqsave(&zdev->iommu_bitmap_lock, irqflags);
210 bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
211 zdev->lazy_bitmap, zdev->iommu_pages);
212 bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
213 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, irqflags);
220 static int dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa,
221 dma_addr_t dma_addr, size_t size, int flags)
225 rc = __dma_update_trans(zdev, pa, dma_addr, size, flags);
229 rc = __dma_purge_tlb(zdev, dma_addr, size, flags);
230 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
231 __dma_update_trans(zdev, pa, dma_addr, size, ZPCI_PTE_INVALID);
236 void dma_free_seg_table(unsigned long entry)
238 unsigned long *sto = get_rt_sto(entry);
241 for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
242 if (reg_entry_isvalid(sto[sx]))
243 dma_free_page_table(get_st_pto(sto[sx]));
245 dma_free_cpu_table(sto);
248 void dma_cleanup_tables(unsigned long *table)
255 for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
256 if (reg_entry_isvalid(table[rtx]))
257 dma_free_seg_table(table[rtx]);
259 dma_free_cpu_table(table);
262 static unsigned long __dma_alloc_iommu(struct device *dev,
263 unsigned long start, int size)
265 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
267 return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
268 start, size, zdev->start_dma >> PAGE_SHIFT,
269 dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT),
273 static dma_addr_t dma_alloc_address(struct device *dev, int size)
275 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
276 unsigned long offset, flags;
278 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
279 offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
281 if (!s390_iommu_strict) {
282 /* global flush before DMA addresses are reused */
283 if (zpci_refresh_global(zdev))
286 bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
287 zdev->lazy_bitmap, zdev->iommu_pages);
288 bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
291 offset = __dma_alloc_iommu(dev, 0, size);
295 zdev->next_bit = offset + size;
296 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
298 return zdev->start_dma + offset * PAGE_SIZE;
301 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
302 return DMA_MAPPING_ERROR;
305 static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
307 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
308 unsigned long flags, offset;
310 offset = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
312 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
313 if (!zdev->iommu_bitmap)
316 if (s390_iommu_strict)
317 bitmap_clear(zdev->iommu_bitmap, offset, size);
319 bitmap_set(zdev->lazy_bitmap, offset, size);
322 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
325 static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
330 } __packed data = {rc, addr};
332 zpci_err_hex(&data, sizeof(data));
335 static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
336 unsigned long offset, size_t size,
337 enum dma_data_direction direction,
340 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
341 unsigned long pa = page_to_phys(page) + offset;
342 int flags = ZPCI_PTE_VALID;
343 unsigned long nr_pages;
347 /* This rounds up number of pages based on size and offset */
348 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
349 dma_addr = dma_alloc_address(dev, nr_pages);
350 if (dma_addr == DMA_MAPPING_ERROR) {
355 /* Use rounded up size */
356 size = nr_pages * PAGE_SIZE;
358 if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
359 flags |= ZPCI_TABLE_PROTECTED;
361 ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
365 atomic64_add(nr_pages, &zdev->mapped_pages);
366 return dma_addr + (offset & ~PAGE_MASK);
369 dma_free_address(dev, dma_addr, nr_pages);
371 zpci_err("map error:\n");
372 zpci_err_dma(ret, pa);
373 return DMA_MAPPING_ERROR;
376 static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
377 size_t size, enum dma_data_direction direction,
380 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
383 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
384 dma_addr = dma_addr & PAGE_MASK;
385 ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
388 zpci_err("unmap error:\n");
389 zpci_err_dma(ret, dma_addr);
393 atomic64_add(npages, &zdev->unmapped_pages);
394 dma_free_address(dev, dma_addr, npages);
397 static void *s390_dma_alloc(struct device *dev, size_t size,
398 dma_addr_t *dma_handle, gfp_t flag,
401 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
406 size = PAGE_ALIGN(size);
407 page = alloc_pages(flag | __GFP_ZERO, get_order(size));
411 pa = page_to_phys(page);
412 map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
413 if (dma_mapping_error(dev, map)) {
414 __free_pages(page, get_order(size));
418 atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages);
421 return phys_to_virt(pa);
424 static void s390_dma_free(struct device *dev, size_t size,
425 void *vaddr, dma_addr_t dma_handle,
428 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
430 size = PAGE_ALIGN(size);
431 atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
432 s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0);
433 free_pages((unsigned long)vaddr, get_order(size));
436 /* Map a segment into a contiguous dma address area */
437 static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
438 size_t size, dma_addr_t *handle,
439 enum dma_data_direction dir)
441 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
442 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
443 dma_addr_t dma_addr_base, dma_addr;
444 int flags = ZPCI_PTE_VALID;
445 struct scatterlist *s;
449 dma_addr_base = dma_alloc_address(dev, nr_pages);
450 if (dma_addr_base == DMA_MAPPING_ERROR)
453 dma_addr = dma_addr_base;
454 if (dir == DMA_NONE || dir == DMA_TO_DEVICE)
455 flags |= ZPCI_TABLE_PROTECTED;
457 for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
458 pa = page_to_phys(sg_page(s));
459 ret = __dma_update_trans(zdev, pa, dma_addr,
460 s->offset + s->length, flags);
464 dma_addr += s->offset + s->length;
466 ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
470 *handle = dma_addr_base;
471 atomic64_add(nr_pages, &zdev->mapped_pages);
476 dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
478 dma_free_address(dev, dma_addr_base, nr_pages);
479 zpci_err("map error:\n");
480 zpci_err_dma(ret, pa);
484 static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
485 int nr_elements, enum dma_data_direction dir,
488 struct scatterlist *s = sg, *start = sg, *dma = sg;
489 unsigned int max = dma_get_max_seg_size(dev);
490 unsigned int size = s->offset + s->length;
491 unsigned int offset = s->offset;
492 int count = 0, i, ret;
494 for (i = 1; i < nr_elements; i++) {
499 if (s->offset || (size & ~PAGE_MASK) ||
500 size + s->length > max) {
501 ret = __s390_dma_map_sg(dev, start, size,
502 &dma->dma_address, dir);
506 dma->dma_address += offset;
507 dma->dma_length = size - offset;
509 size = offset = s->offset;
516 ret = __s390_dma_map_sg(dev, start, size, &dma->dma_address, dir);
520 dma->dma_address += offset;
521 dma->dma_length = size - offset;
525 for_each_sg(sg, s, count, i)
526 s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s),
532 static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
533 int nr_elements, enum dma_data_direction dir,
536 struct scatterlist *s;
539 for_each_sg(sg, s, nr_elements, i) {
541 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
548 static unsigned long *bitmap_vzalloc(size_t bits, gfp_t flags)
550 size_t n = BITS_TO_LONGS(bits);
553 if (unlikely(check_mul_overflow(n, sizeof(unsigned long), &bytes)))
556 return vzalloc(bytes);
559 int zpci_dma_init_device(struct zpci_dev *zdev)
564 * At this point, if the device is part of an IOMMU domain, this would
565 * be a strong hint towards a bug in the IOMMU API (common) code and/or
566 * simultaneous access via IOMMU and DMA API. So let's issue a warning.
568 WARN_ON(zdev->s390_domain);
570 spin_lock_init(&zdev->iommu_bitmap_lock);
571 spin_lock_init(&zdev->dma_table_lock);
573 zdev->dma_table = dma_alloc_cpu_table();
574 if (!zdev->dma_table) {
580 * Restrict the iommu bitmap size to the minimum of the following:
581 * - s390_iommu_aperture which defaults to high_memory
582 * - 3-level pagetable address limit minus start_dma offset
583 * - DMA address range allowed by the hardware (clp query pci fn)
585 * Also set zdev->end_dma to the actual end address of the usable
586 * range, instead of the theoretical maximum as reported by hardware.
588 * This limits the number of concurrently usable DMA mappings since
589 * for each DMA mapped memory address we need a DMA address including
590 * extra DMA addresses for multiple mappings of the same memory address.
592 zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
593 zdev->iommu_size = min3(s390_iommu_aperture,
594 ZPCI_TABLE_SIZE_RT - zdev->start_dma,
595 zdev->end_dma - zdev->start_dma + 1);
596 zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
597 zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
598 zdev->iommu_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
599 if (!zdev->iommu_bitmap) {
603 if (!s390_iommu_strict) {
604 zdev->lazy_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
605 if (!zdev->lazy_bitmap) {
611 if (zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
612 virt_to_phys(zdev->dma_table))) {
619 vfree(zdev->iommu_bitmap);
620 zdev->iommu_bitmap = NULL;
621 vfree(zdev->lazy_bitmap);
622 zdev->lazy_bitmap = NULL;
624 dma_free_cpu_table(zdev->dma_table);
625 zdev->dma_table = NULL;
630 int zpci_dma_exit_device(struct zpci_dev *zdev)
635 * At this point, if the device is part of an IOMMU domain, this would
636 * be a strong hint towards a bug in the IOMMU API (common) code and/or
637 * simultaneous access via IOMMU and DMA API. So let's issue a warning.
639 WARN_ON(zdev->s390_domain);
640 if (zdev_enabled(zdev))
641 cc = zpci_unregister_ioat(zdev, 0);
643 * cc == 3 indicates the function is gone already. This can happen
644 * if the function was deconfigured/disabled suddenly and we have not
645 * received a new handle yet.
650 dma_cleanup_tables(zdev->dma_table);
651 zdev->dma_table = NULL;
652 vfree(zdev->iommu_bitmap);
653 zdev->iommu_bitmap = NULL;
654 vfree(zdev->lazy_bitmap);
655 zdev->lazy_bitmap = NULL;
660 static int __init dma_alloc_cpu_table_caches(void)
662 dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
663 ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
665 if (!dma_region_table_cache)
668 dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
669 ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
671 if (!dma_page_table_cache) {
672 kmem_cache_destroy(dma_region_table_cache);
678 int __init zpci_dma_init(void)
680 s390_iommu_aperture = (u64)virt_to_phys(high_memory);
681 if (!s390_iommu_aperture_factor)
682 s390_iommu_aperture = ULONG_MAX;
684 s390_iommu_aperture *= s390_iommu_aperture_factor;
686 return dma_alloc_cpu_table_caches();
689 void zpci_dma_exit(void)
691 kmem_cache_destroy(dma_page_table_cache);
692 kmem_cache_destroy(dma_region_table_cache);
695 const struct dma_map_ops s390_pci_dma_ops = {
696 .alloc = s390_dma_alloc,
697 .free = s390_dma_free,
698 .map_sg = s390_dma_map_sg,
699 .unmap_sg = s390_dma_unmap_sg,
700 .map_page = s390_dma_map_pages,
701 .unmap_page = s390_dma_unmap_pages,
702 .mmap = dma_common_mmap,
703 .get_sgtable = dma_common_get_sgtable,
704 .alloc_pages = dma_common_alloc_pages,
705 .free_pages = dma_common_free_pages,
706 /* dma_supported is unconditionally true without a callback */
708 EXPORT_SYMBOL_GPL(s390_pci_dma_ops);
710 static int __init s390_iommu_setup(char *str)
712 if (!strcmp(str, "strict"))
713 s390_iommu_strict = 1;
717 __setup("s390_iommu=", s390_iommu_setup);
719 static int __init s390_iommu_aperture_setup(char *str)
721 if (kstrtou32(str, 10, &s390_iommu_aperture_factor))
722 s390_iommu_aperture_factor = 1;
726 __setup("s390_iommu_aperture=", s390_iommu_aperture_setup);