2 * Copyright IBM Corp. 2012
5 * Jan Glauber <jang@linux.vnet.ibm.com>
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/export.h>
11 #include <linux/iommu-helper.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/vmalloc.h>
14 #include <linux/pci.h>
15 #include <linux/overflow.h>
16 #include <asm/pci_dma.h>
18 #define S390_MAPPING_ERROR (~(dma_addr_t) 0x0)
20 static struct kmem_cache *dma_region_table_cache;
21 static struct kmem_cache *dma_page_table_cache;
22 static int s390_iommu_strict;
24 static int zpci_refresh_global(struct zpci_dev *zdev)
26 return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma,
27 zdev->iommu_pages * PAGE_SIZE);
30 unsigned long *dma_alloc_cpu_table(void)
32 unsigned long *table, *entry;
34 table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
38 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
39 *entry = ZPCI_TABLE_INVALID;
43 static void dma_free_cpu_table(void *table)
45 kmem_cache_free(dma_region_table_cache, table);
48 static unsigned long *dma_alloc_page_table(void)
50 unsigned long *table, *entry;
52 table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
56 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
57 *entry = ZPCI_PTE_INVALID;
61 static void dma_free_page_table(void *table)
63 kmem_cache_free(dma_page_table_cache, table);
66 static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
70 if (reg_entry_isvalid(*entry))
71 sto = get_rt_sto(*entry);
73 sto = dma_alloc_cpu_table();
77 set_rt_sto(entry, sto);
78 validate_rt_entry(entry);
79 entry_clr_protected(entry);
84 static unsigned long *dma_get_page_table_origin(unsigned long *entry)
88 if (reg_entry_isvalid(*entry))
89 pto = get_st_pto(*entry);
91 pto = dma_alloc_page_table();
94 set_st_pto(entry, pto);
95 validate_st_entry(entry);
96 entry_clr_protected(entry);
101 unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
103 unsigned long *sto, *pto;
104 unsigned int rtx, sx, px;
106 rtx = calc_rtx(dma_addr);
107 sto = dma_get_seg_table_origin(&rto[rtx]);
111 sx = calc_sx(dma_addr);
112 pto = dma_get_page_table_origin(&sto[sx]);
116 px = calc_px(dma_addr);
120 void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags)
122 if (flags & ZPCI_PTE_INVALID) {
123 invalidate_pt_entry(entry);
125 set_pt_pfaa(entry, page_addr);
126 validate_pt_entry(entry);
129 if (flags & ZPCI_TABLE_PROTECTED)
130 entry_set_protected(entry);
132 entry_clr_protected(entry);
135 static int __dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
136 dma_addr_t dma_addr, size_t size, int flags)
138 unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
139 u8 *page_addr = (u8 *) (pa & PAGE_MASK);
140 unsigned long irq_flags;
141 unsigned long *entry;
147 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
148 if (!zdev->dma_table) {
153 for (i = 0; i < nr_pages; i++) {
154 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
159 dma_update_cpu_trans(entry, page_addr, flags);
160 page_addr += PAGE_SIZE;
161 dma_addr += PAGE_SIZE;
165 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
166 flags = ZPCI_PTE_INVALID;
168 page_addr -= PAGE_SIZE;
169 dma_addr -= PAGE_SIZE;
170 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
173 dma_update_cpu_trans(entry, page_addr, flags);
177 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
181 static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
182 size_t size, int flags)
185 * With zdev->tlb_refresh == 0, rpcit is not required to establish new
186 * translations when previously invalid translation-table entries are
187 * validated. With lazy unmap, rpcit is skipped for previously valid
188 * entries, but a global rpcit is then required before any address can
189 * be re-used, i.e. after each iommu bitmap wrap-around.
191 if ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID) {
192 if (!zdev->tlb_refresh)
195 if (!s390_iommu_strict)
199 return zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
203 static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
204 dma_addr_t dma_addr, size_t size, int flags)
208 rc = __dma_update_trans(zdev, pa, dma_addr, size, flags);
212 rc = __dma_purge_tlb(zdev, dma_addr, size, flags);
213 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
214 __dma_update_trans(zdev, pa, dma_addr, size, ZPCI_PTE_INVALID);
219 void dma_free_seg_table(unsigned long entry)
221 unsigned long *sto = get_rt_sto(entry);
224 for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
225 if (reg_entry_isvalid(sto[sx]))
226 dma_free_page_table(get_st_pto(sto[sx]));
228 dma_free_cpu_table(sto);
231 void dma_cleanup_tables(unsigned long *table)
238 for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
239 if (reg_entry_isvalid(table[rtx]))
240 dma_free_seg_table(table[rtx]);
242 dma_free_cpu_table(table);
245 static unsigned long __dma_alloc_iommu(struct device *dev,
246 unsigned long start, int size)
248 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
249 unsigned long boundary_size;
251 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
252 PAGE_SIZE) >> PAGE_SHIFT;
253 return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
254 start, size, zdev->start_dma >> PAGE_SHIFT,
258 static dma_addr_t dma_alloc_address(struct device *dev, int size)
260 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
261 unsigned long offset, flags;
263 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
264 offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
266 if (!s390_iommu_strict) {
267 /* global flush before DMA addresses are reused */
268 if (zpci_refresh_global(zdev))
271 bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
272 zdev->lazy_bitmap, zdev->iommu_pages);
273 bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
276 offset = __dma_alloc_iommu(dev, 0, size);
280 zdev->next_bit = offset + size;
281 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
283 return zdev->start_dma + offset * PAGE_SIZE;
286 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
287 return S390_MAPPING_ERROR;
290 static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
292 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
293 unsigned long flags, offset;
295 offset = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
297 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
298 if (!zdev->iommu_bitmap)
301 if (s390_iommu_strict)
302 bitmap_clear(zdev->iommu_bitmap, offset, size);
304 bitmap_set(zdev->lazy_bitmap, offset, size);
307 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
310 static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
315 } __packed data = {rc, addr};
317 zpci_err_hex(&data, sizeof(data));
320 static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
321 unsigned long offset, size_t size,
322 enum dma_data_direction direction,
325 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
326 unsigned long pa = page_to_phys(page) + offset;
327 int flags = ZPCI_PTE_VALID;
328 unsigned long nr_pages;
332 /* This rounds up number of pages based on size and offset */
333 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
334 dma_addr = dma_alloc_address(dev, nr_pages);
335 if (dma_addr == S390_MAPPING_ERROR) {
340 /* Use rounded up size */
341 size = nr_pages * PAGE_SIZE;
343 if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
344 flags |= ZPCI_TABLE_PROTECTED;
346 ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
350 atomic64_add(nr_pages, &zdev->mapped_pages);
351 return dma_addr + (offset & ~PAGE_MASK);
354 dma_free_address(dev, dma_addr, nr_pages);
356 zpci_err("map error:\n");
357 zpci_err_dma(ret, pa);
358 return S390_MAPPING_ERROR;
361 static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
362 size_t size, enum dma_data_direction direction,
365 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
368 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
369 dma_addr = dma_addr & PAGE_MASK;
370 ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
373 zpci_err("unmap error:\n");
374 zpci_err_dma(ret, dma_addr);
378 atomic64_add(npages, &zdev->unmapped_pages);
379 dma_free_address(dev, dma_addr, npages);
382 static void *s390_dma_alloc(struct device *dev, size_t size,
383 dma_addr_t *dma_handle, gfp_t flag,
386 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
391 size = PAGE_ALIGN(size);
392 page = alloc_pages(flag, get_order(size));
396 pa = page_to_phys(page);
397 map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
398 if (dma_mapping_error(dev, map)) {
399 free_pages(pa, get_order(size));
403 atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages);
409 static void s390_dma_free(struct device *dev, size_t size,
410 void *pa, dma_addr_t dma_handle,
413 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
415 size = PAGE_ALIGN(size);
416 atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
417 s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0);
418 free_pages((unsigned long) pa, get_order(size));
421 /* Map a segment into a contiguous dma address area */
422 static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
423 size_t size, dma_addr_t *handle,
424 enum dma_data_direction dir)
426 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
427 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
428 dma_addr_t dma_addr_base, dma_addr;
429 int flags = ZPCI_PTE_VALID;
430 struct scatterlist *s;
431 unsigned long pa = 0;
434 dma_addr_base = dma_alloc_address(dev, nr_pages);
435 if (dma_addr_base == S390_MAPPING_ERROR)
438 dma_addr = dma_addr_base;
439 if (dir == DMA_NONE || dir == DMA_TO_DEVICE)
440 flags |= ZPCI_TABLE_PROTECTED;
442 for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
443 pa = page_to_phys(sg_page(s));
444 ret = __dma_update_trans(zdev, pa, dma_addr,
445 s->offset + s->length, flags);
449 dma_addr += s->offset + s->length;
451 ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
455 *handle = dma_addr_base;
456 atomic64_add(nr_pages, &zdev->mapped_pages);
461 dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
463 dma_free_address(dev, dma_addr_base, nr_pages);
464 zpci_err("map error:\n");
465 zpci_err_dma(ret, pa);
469 static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
470 int nr_elements, enum dma_data_direction dir,
473 struct scatterlist *s = sg, *start = sg, *dma = sg;
474 unsigned int max = dma_get_max_seg_size(dev);
475 unsigned int size = s->offset + s->length;
476 unsigned int offset = s->offset;
479 for (i = 1; i < nr_elements; i++) {
482 s->dma_address = S390_MAPPING_ERROR;
485 if (s->offset || (size & ~PAGE_MASK) ||
486 size + s->length > max) {
487 if (__s390_dma_map_sg(dev, start, size,
488 &dma->dma_address, dir))
491 dma->dma_address += offset;
492 dma->dma_length = size - offset;
494 size = offset = s->offset;
501 if (__s390_dma_map_sg(dev, start, size, &dma->dma_address, dir))
504 dma->dma_address += offset;
505 dma->dma_length = size - offset;
509 for_each_sg(sg, s, count, i)
510 s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s),
516 static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
517 int nr_elements, enum dma_data_direction dir,
520 struct scatterlist *s;
523 for_each_sg(sg, s, nr_elements, i) {
525 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
532 static unsigned long *bitmap_vzalloc(size_t bits, gfp_t flags)
534 size_t n = BITS_TO_LONGS(bits);
537 if (unlikely(check_mul_overflow(n, sizeof(unsigned long), &bytes)))
540 return vzalloc(bytes);
543 static int s390_mapping_error(struct device *dev, dma_addr_t dma_addr)
545 return dma_addr == S390_MAPPING_ERROR;
548 int zpci_dma_init_device(struct zpci_dev *zdev)
553 * At this point, if the device is part of an IOMMU domain, this would
554 * be a strong hint towards a bug in the IOMMU API (common) code and/or
555 * simultaneous access via IOMMU and DMA API. So let's issue a warning.
557 WARN_ON(zdev->s390_domain);
559 spin_lock_init(&zdev->iommu_bitmap_lock);
560 spin_lock_init(&zdev->dma_table_lock);
562 zdev->dma_table = dma_alloc_cpu_table();
563 if (!zdev->dma_table) {
569 * Restrict the iommu bitmap size to the minimum of the following:
571 * - 3-level pagetable address limit minus start_dma offset
572 * - DMA address range allowed by the hardware (clp query pci fn)
574 * Also set zdev->end_dma to the actual end address of the usable
575 * range, instead of the theoretical maximum as reported by hardware.
577 zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
578 zdev->iommu_size = min3((u64) high_memory,
579 ZPCI_TABLE_SIZE_RT - zdev->start_dma,
580 zdev->end_dma - zdev->start_dma + 1);
581 zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
582 zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
583 zdev->iommu_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
584 if (!zdev->iommu_bitmap) {
588 if (!s390_iommu_strict) {
589 zdev->lazy_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
590 if (!zdev->lazy_bitmap) {
596 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
597 (u64) zdev->dma_table);
603 vfree(zdev->iommu_bitmap);
604 zdev->iommu_bitmap = NULL;
605 vfree(zdev->lazy_bitmap);
606 zdev->lazy_bitmap = NULL;
608 dma_free_cpu_table(zdev->dma_table);
609 zdev->dma_table = NULL;
614 void zpci_dma_exit_device(struct zpci_dev *zdev)
617 * At this point, if the device is part of an IOMMU domain, this would
618 * be a strong hint towards a bug in the IOMMU API (common) code and/or
619 * simultaneous access via IOMMU and DMA API. So let's issue a warning.
621 WARN_ON(zdev->s390_domain);
623 if (zpci_unregister_ioat(zdev, 0))
626 dma_cleanup_tables(zdev->dma_table);
627 zdev->dma_table = NULL;
628 vfree(zdev->iommu_bitmap);
629 zdev->iommu_bitmap = NULL;
630 vfree(zdev->lazy_bitmap);
631 zdev->lazy_bitmap = NULL;
636 static int __init dma_alloc_cpu_table_caches(void)
638 dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
639 ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
641 if (!dma_region_table_cache)
644 dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
645 ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
647 if (!dma_page_table_cache) {
648 kmem_cache_destroy(dma_region_table_cache);
654 int __init zpci_dma_init(void)
656 return dma_alloc_cpu_table_caches();
659 void zpci_dma_exit(void)
661 kmem_cache_destroy(dma_page_table_cache);
662 kmem_cache_destroy(dma_region_table_cache);
665 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
667 static int __init dma_debug_do_init(void)
669 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
672 fs_initcall(dma_debug_do_init);
674 const struct dma_map_ops s390_pci_dma_ops = {
675 .alloc = s390_dma_alloc,
676 .free = s390_dma_free,
677 .map_sg = s390_dma_map_sg,
678 .unmap_sg = s390_dma_unmap_sg,
679 .map_page = s390_dma_map_pages,
680 .unmap_page = s390_dma_unmap_pages,
681 .mapping_error = s390_mapping_error,
682 /* if we support direct DMA this must be conditional */
684 /* dma_supported is unconditionally true without a callback */
686 EXPORT_SYMBOL_GPL(s390_pci_dma_ops);
688 static int __init s390_iommu_setup(char *str)
690 if (!strncmp(str, "strict", 6))
691 s390_iommu_strict = 1;
695 __setup("s390_iommu=", s390_iommu_setup);