1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
5 * Rewrite, cleanup, new allocation schemes, virtual merging:
6 * Copyright (C) 2004 Olof Johansson, IBM Corporation
7 * and Ben. Herrenschmidt, IBM Corporation
9 * Dynamic DMA mapping support, bus-independent parts.
13 #include <linux/init.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/bitmap.h>
21 #include <linux/iommu-helper.h>
22 #include <linux/crash_dump.h>
23 #include <linux/hash.h>
24 #include <linux/fault-inject.h>
25 #include <linux/pci.h>
26 #include <linux/iommu.h>
27 #include <linux/sched.h>
28 #include <linux/debugfs.h>
30 #include <asm/iommu.h>
31 #include <asm/pci-bridge.h>
32 #include <asm/machdep.h>
33 #include <asm/kdump.h>
34 #include <asm/fadump.h>
37 #include <asm/mmu_context.h>
38 #include <asm/ppc-pci.h>
42 #ifdef CONFIG_IOMMU_DEBUGFS
43 static int iommu_debugfs_weight_get(void *data, u64 *val)
45 struct iommu_table *tbl = data;
46 *val = bitmap_weight(tbl->it_map, tbl->it_size);
49 DEFINE_DEBUGFS_ATTRIBUTE(iommu_debugfs_fops_weight, iommu_debugfs_weight_get, NULL, "%llu\n");
51 static void iommu_debugfs_add(struct iommu_table *tbl)
54 struct dentry *liobn_entry;
56 sprintf(name, "%08lx", tbl->it_index);
57 liobn_entry = debugfs_create_dir(name, iommu_debugfs_dir);
59 debugfs_create_file_unsafe("weight", 0400, liobn_entry, tbl, &iommu_debugfs_fops_weight);
60 debugfs_create_ulong("it_size", 0400, liobn_entry, &tbl->it_size);
61 debugfs_create_ulong("it_page_shift", 0400, liobn_entry, &tbl->it_page_shift);
62 debugfs_create_ulong("it_reserved_start", 0400, liobn_entry, &tbl->it_reserved_start);
63 debugfs_create_ulong("it_reserved_end", 0400, liobn_entry, &tbl->it_reserved_end);
64 debugfs_create_ulong("it_indirect_levels", 0400, liobn_entry, &tbl->it_indirect_levels);
65 debugfs_create_ulong("it_level_size", 0400, liobn_entry, &tbl->it_level_size);
68 static void iommu_debugfs_del(struct iommu_table *tbl)
72 sprintf(name, "%08lx", tbl->it_index);
73 debugfs_lookup_and_remove(name, iommu_debugfs_dir);
76 static void iommu_debugfs_add(struct iommu_table *tbl){}
77 static void iommu_debugfs_del(struct iommu_table *tbl){}
82 static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
84 static int __init setup_iommu(char *str)
86 if (!strcmp(str, "novmerge"))
88 else if (!strcmp(str, "vmerge"))
93 __setup("iommu=", setup_iommu);
95 static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
98 * We precalculate the hash to avoid doing it on every allocation.
100 * The hash is important to spread CPUs across all the pools. For example,
101 * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
102 * with 4 pools all primary threads would map to the same pool.
104 static int __init setup_iommu_pool_hash(void)
108 for_each_possible_cpu(i)
109 per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
113 subsys_initcall(setup_iommu_pool_hash);
115 #ifdef CONFIG_FAIL_IOMMU
117 static DECLARE_FAULT_ATTR(fail_iommu);
119 static int __init setup_fail_iommu(char *str)
121 return setup_fault_attr(&fail_iommu, str);
123 __setup("fail_iommu=", setup_fail_iommu);
125 static bool should_fail_iommu(struct device *dev)
127 return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
130 static int __init fail_iommu_debugfs(void)
132 struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
135 return PTR_ERR_OR_ZERO(dir);
137 late_initcall(fail_iommu_debugfs);
139 static ssize_t fail_iommu_show(struct device *dev,
140 struct device_attribute *attr, char *buf)
142 return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
145 static ssize_t fail_iommu_store(struct device *dev,
146 struct device_attribute *attr, const char *buf,
151 if (count > 0 && sscanf(buf, "%d", &i) > 0)
152 dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
157 static DEVICE_ATTR_RW(fail_iommu);
159 static int fail_iommu_bus_notify(struct notifier_block *nb,
160 unsigned long action, void *data)
162 struct device *dev = data;
164 if (action == BUS_NOTIFY_ADD_DEVICE) {
165 if (device_create_file(dev, &dev_attr_fail_iommu))
166 pr_warn("Unable to create IOMMU fault injection sysfs "
168 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
169 device_remove_file(dev, &dev_attr_fail_iommu);
176 * PCI and VIO buses need separate notifier_block structs, since they're linked
177 * list nodes. Sharing a notifier_block would mean that any notifiers later
178 * registered for PCI buses would also get called by VIO buses and vice versa.
180 static struct notifier_block fail_iommu_pci_bus_notifier = {
181 .notifier_call = fail_iommu_bus_notify
185 static struct notifier_block fail_iommu_vio_bus_notifier = {
186 .notifier_call = fail_iommu_bus_notify
190 static int __init fail_iommu_setup(void)
193 bus_register_notifier(&pci_bus_type, &fail_iommu_pci_bus_notifier);
196 bus_register_notifier(&vio_bus_type, &fail_iommu_vio_bus_notifier);
202 * Must execute after PCI and VIO subsystem have initialised but before
203 * devices are probed.
205 arch_initcall(fail_iommu_setup);
207 static inline bool should_fail_iommu(struct device *dev)
213 static unsigned long iommu_range_alloc(struct device *dev,
214 struct iommu_table *tbl,
215 unsigned long npages,
216 unsigned long *handle,
218 unsigned int align_order)
220 unsigned long n, end, start;
222 int largealloc = npages > 15;
224 unsigned long align_mask;
226 unsigned int pool_nr;
227 struct iommu_pool *pool;
229 align_mask = (1ull << align_order) - 1;
231 /* This allocator was derived from x86_64's bit string search */
234 if (unlikely(npages == 0)) {
235 if (printk_ratelimit())
237 return DMA_MAPPING_ERROR;
240 if (should_fail_iommu(dev))
241 return DMA_MAPPING_ERROR;
244 * We don't need to disable preemption here because any CPU can
245 * safely use any IOMMU pool.
247 pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
250 pool = &(tbl->large_pool);
252 pool = &(tbl->pools[pool_nr]);
254 spin_lock_irqsave(&(pool->lock), flags);
257 if ((pass == 0) && handle && *handle &&
258 (*handle >= pool->start) && (*handle < pool->end))
265 /* The case below can happen if we have a small segment appended
266 * to a large, or when the previous alloc was at the very end of
267 * the available space. If so, go back to the initial start.
272 if (limit + tbl->it_offset > mask) {
273 limit = mask - tbl->it_offset + 1;
274 /* If we're constrained on address range, first try
275 * at the masked hint to avoid O(n) search complexity,
276 * but on second pass, start at 0 in pool 0.
278 if ((start & mask) >= limit || pass > 0) {
279 spin_unlock(&(pool->lock));
280 pool = &(tbl->pools[0]);
281 spin_lock(&(pool->lock));
288 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
289 dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift),
292 if (likely(pass == 0)) {
293 /* First try the pool from the start */
294 pool->hint = pool->start;
298 } else if (pass <= tbl->nr_pools) {
299 /* Now try scanning all the other pools */
300 spin_unlock(&(pool->lock));
301 pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
302 pool = &tbl->pools[pool_nr];
303 spin_lock(&(pool->lock));
304 pool->hint = pool->start;
308 } else if (pass == tbl->nr_pools + 1) {
309 /* Last resort: try largepool */
310 spin_unlock(&pool->lock);
311 pool = &tbl->large_pool;
312 spin_lock(&pool->lock);
313 pool->hint = pool->start;
319 spin_unlock_irqrestore(&(pool->lock), flags);
320 return DMA_MAPPING_ERROR;
326 /* Bump the hint to a new block for small allocs. */
328 /* Don't bump to new block to avoid fragmentation */
331 /* Overflow will be taken care of at the next allocation */
332 pool->hint = (end + tbl->it_blocksize - 1) &
333 ~(tbl->it_blocksize - 1);
336 /* Update handle for SG allocations */
340 spin_unlock_irqrestore(&(pool->lock), flags);
345 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
346 void *page, unsigned int npages,
347 enum dma_data_direction direction,
348 unsigned long mask, unsigned int align_order,
352 dma_addr_t ret = DMA_MAPPING_ERROR;
355 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
357 if (unlikely(entry == DMA_MAPPING_ERROR))
358 return DMA_MAPPING_ERROR;
360 entry += tbl->it_offset; /* Offset into real TCE table */
361 ret = entry << tbl->it_page_shift; /* Set the return dma address */
363 /* Put the TCEs in the HW table */
364 build_fail = tbl->it_ops->set(tbl, entry, npages,
365 (unsigned long)page &
366 IOMMU_PAGE_MASK(tbl), direction, attrs);
368 /* tbl->it_ops->set() only returns non-zero for transient errors.
369 * Clean up the table bitmap in this case and return
370 * DMA_MAPPING_ERROR. For all other errors the functionality is
373 if (unlikely(build_fail)) {
374 __iommu_free(tbl, ret, npages);
375 return DMA_MAPPING_ERROR;
378 /* Flush/invalidate TLB caches if necessary */
379 if (tbl->it_ops->flush)
380 tbl->it_ops->flush(tbl);
382 /* Make sure updates are seen by hardware */
388 static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
391 unsigned long entry, free_entry;
393 entry = dma_addr >> tbl->it_page_shift;
394 free_entry = entry - tbl->it_offset;
396 if (((free_entry + npages) > tbl->it_size) ||
397 (entry < tbl->it_offset)) {
398 if (printk_ratelimit()) {
399 printk(KERN_INFO "iommu_free: invalid entry\n");
400 printk(KERN_INFO "\tentry = 0x%lx\n", entry);
401 printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr);
402 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl);
403 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno);
404 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size);
405 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset);
406 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
416 static struct iommu_pool *get_pool(struct iommu_table *tbl,
419 struct iommu_pool *p;
420 unsigned long largepool_start = tbl->large_pool.start;
422 /* The large pool is the last pool at the top of the table */
423 if (entry >= largepool_start) {
424 p = &tbl->large_pool;
426 unsigned int pool_nr = entry / tbl->poolsize;
428 BUG_ON(pool_nr > tbl->nr_pools);
429 p = &tbl->pools[pool_nr];
435 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
438 unsigned long entry, free_entry;
440 struct iommu_pool *pool;
442 entry = dma_addr >> tbl->it_page_shift;
443 free_entry = entry - tbl->it_offset;
445 pool = get_pool(tbl, free_entry);
447 if (!iommu_free_check(tbl, dma_addr, npages))
450 tbl->it_ops->clear(tbl, entry, npages);
452 spin_lock_irqsave(&(pool->lock), flags);
453 bitmap_clear(tbl->it_map, free_entry, npages);
454 spin_unlock_irqrestore(&(pool->lock), flags);
457 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
460 __iommu_free(tbl, dma_addr, npages);
462 /* Make sure TLB cache is flushed if the HW needs it. We do
463 * not do an mb() here on purpose, it is not needed on any of
464 * the current platforms.
466 if (tbl->it_ops->flush)
467 tbl->it_ops->flush(tbl);
470 int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
471 struct scatterlist *sglist, int nelems,
472 unsigned long mask, enum dma_data_direction direction,
475 dma_addr_t dma_next = 0, dma_addr;
476 struct scatterlist *s, *outs, *segstart;
477 int outcount, incount, i, build_fail = 0;
479 unsigned long handle;
480 unsigned int max_seg_size;
482 BUG_ON(direction == DMA_NONE);
484 if ((nelems == 0) || !tbl)
487 outs = s = segstart = &sglist[0];
492 /* Init first segment length for backout at failure */
493 outs->dma_length = 0;
495 DBG("sg mapping %d elements:\n", nelems);
497 max_seg_size = dma_get_max_seg_size(dev);
498 for_each_sg(sglist, s, nelems, i) {
499 unsigned long vaddr, npages, entry, slen;
507 /* Allocate iommu entries for that segment */
508 vaddr = (unsigned long) sg_virt(s);
509 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
511 if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
512 (vaddr & ~PAGE_MASK) == 0)
513 align = PAGE_SHIFT - tbl->it_page_shift;
514 entry = iommu_range_alloc(dev, tbl, npages, &handle,
515 mask >> tbl->it_page_shift, align);
517 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
520 if (unlikely(entry == DMA_MAPPING_ERROR)) {
521 if (!(attrs & DMA_ATTR_NO_WARN) &&
523 dev_info(dev, "iommu_alloc failed, tbl %p "
524 "vaddr %lx npages %lu\n", tbl, vaddr,
529 /* Convert entry to a dma_addr_t */
530 entry += tbl->it_offset;
531 dma_addr = entry << tbl->it_page_shift;
532 dma_addr |= (vaddr & ~IOMMU_PAGE_MASK(tbl));
534 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
535 npages, entry, dma_addr);
537 /* Insert into HW table */
538 build_fail = tbl->it_ops->set(tbl, entry, npages,
539 vaddr & IOMMU_PAGE_MASK(tbl),
541 if(unlikely(build_fail))
544 /* If we are in an open segment, try merging */
546 DBG(" - trying merge...\n");
547 /* We cannot merge if:
548 * - allocated dma_addr isn't contiguous to previous allocation
550 if (novmerge || (dma_addr != dma_next) ||
551 (outs->dma_length + s->length > max_seg_size)) {
552 /* Can't merge: create a new segment */
555 outs = sg_next(outs);
556 DBG(" can't merge, new segment.\n");
558 outs->dma_length += s->length;
559 DBG(" merged, new len: %ux\n", outs->dma_length);
564 /* This is a new segment, fill entries */
565 DBG(" - filling new segment.\n");
566 outs->dma_address = dma_addr;
567 outs->dma_length = slen;
570 /* Calculate next page pointer for contiguous check */
571 dma_next = dma_addr + slen;
573 DBG(" - dma next is: %lx\n", dma_next);
576 /* Flush/invalidate TLB caches if necessary */
577 if (tbl->it_ops->flush)
578 tbl->it_ops->flush(tbl);
580 DBG("mapped %d elements:\n", outcount);
582 /* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
583 * next entry of the sglist if we didn't fill the list completely
585 if (outcount < incount) {
586 outs = sg_next(outs);
587 outs->dma_length = 0;
590 /* Make sure updates are seen by hardware */
596 for_each_sg(sglist, s, nelems, i) {
597 if (s->dma_length != 0) {
598 unsigned long vaddr, npages;
600 vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
601 npages = iommu_num_pages(s->dma_address, s->dma_length,
602 IOMMU_PAGE_SIZE(tbl));
603 __iommu_free(tbl, vaddr, npages);
613 void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
614 int nelems, enum dma_data_direction direction,
617 struct scatterlist *sg;
619 BUG_ON(direction == DMA_NONE);
627 dma_addr_t dma_handle = sg->dma_address;
629 if (sg->dma_length == 0)
631 npages = iommu_num_pages(dma_handle, sg->dma_length,
632 IOMMU_PAGE_SIZE(tbl));
633 __iommu_free(tbl, dma_handle, npages);
637 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
638 * do not do an mb() here, the affected platforms do not need it
641 if (tbl->it_ops->flush)
642 tbl->it_ops->flush(tbl);
645 static void iommu_table_clear(struct iommu_table *tbl)
648 * In case of firmware assisted dump system goes through clean
649 * reboot process at the time of system crash. Hence it's safe to
650 * clear the TCE entries if firmware assisted dump is active.
652 if (!is_kdump_kernel() || is_fadump_active()) {
653 /* Clear the table in case firmware left allocations in it */
654 tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
658 #ifdef CONFIG_CRASH_DUMP
659 if (tbl->it_ops->get) {
660 unsigned long index, tceval, tcecount = 0;
662 /* Reserve the existing mappings left by the first kernel. */
663 for (index = 0; index < tbl->it_size; index++) {
664 tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
666 * Freed TCE entry contains 0x7fffffffffffffff on JS20
668 if (tceval && (tceval != 0x7fffffffffffffffUL)) {
669 __set_bit(index, tbl->it_map);
674 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
675 printk(KERN_WARNING "TCE table is full; freeing ");
676 printk(KERN_WARNING "%d entries for the kdump boot\n",
677 KDUMP_MIN_TCE_ENTRIES);
678 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
679 index < tbl->it_size; index++)
680 __clear_bit(index, tbl->it_map);
686 static void iommu_table_reserve_pages(struct iommu_table *tbl,
687 unsigned long res_start, unsigned long res_end)
691 WARN_ON_ONCE(res_end < res_start);
693 * Reserve page 0 so it will not be used for any mappings.
694 * This avoids buggy drivers that consider page 0 to be invalid
695 * to crash the machine or even lose data.
697 if (tbl->it_offset == 0)
698 set_bit(0, tbl->it_map);
700 if (res_start < tbl->it_offset)
701 res_start = tbl->it_offset;
703 if (res_end > (tbl->it_offset + tbl->it_size))
704 res_end = tbl->it_offset + tbl->it_size;
706 /* Check if res_start..res_end is a valid range in the table */
707 if (res_start >= res_end) {
708 tbl->it_reserved_start = tbl->it_offset;
709 tbl->it_reserved_end = tbl->it_offset;
713 tbl->it_reserved_start = res_start;
714 tbl->it_reserved_end = res_end;
716 for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
717 set_bit(i - tbl->it_offset, tbl->it_map);
721 * Build a iommu_table structure. This contains a bit map which
722 * is used to manage allocation of the tce space.
724 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
725 unsigned long res_start, unsigned long res_end)
728 static int welcomed = 0;
730 struct iommu_pool *p;
732 BUG_ON(!tbl->it_ops);
734 /* number of bytes needed for the bitmap */
735 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
737 tbl->it_map = vzalloc_node(sz, nid);
739 pr_err("%s: Can't allocate %ld bytes\n", __func__, sz);
743 iommu_table_reserve_pages(tbl, res_start, res_end);
745 /* We only split the IOMMU table if we have 1GB or more of space */
746 if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
747 tbl->nr_pools = IOMMU_NR_POOLS;
751 /* We reserve the top 1/4 of the table for large allocations */
752 tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
754 for (i = 0; i < tbl->nr_pools; i++) {
756 spin_lock_init(&(p->lock));
757 p->start = tbl->poolsize * i;
759 p->end = p->start + tbl->poolsize;
762 p = &tbl->large_pool;
763 spin_lock_init(&(p->lock));
764 p->start = tbl->poolsize * i;
766 p->end = tbl->it_size;
768 iommu_table_clear(tbl);
771 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
772 novmerge ? "disabled" : "enabled");
776 iommu_debugfs_add(tbl);
781 bool iommu_table_in_use(struct iommu_table *tbl)
783 unsigned long start = 0, end;
785 /* ignore reserved bit0 */
786 if (tbl->it_offset == 0)
789 /* Simple case with no reserved MMIO32 region */
790 if (!tbl->it_reserved_start && !tbl->it_reserved_end)
791 return find_next_bit(tbl->it_map, tbl->it_size, start) != tbl->it_size;
793 end = tbl->it_reserved_start - tbl->it_offset;
794 if (find_next_bit(tbl->it_map, end, start) != end)
797 start = tbl->it_reserved_end - tbl->it_offset;
799 return find_next_bit(tbl->it_map, end, start) != end;
802 static void iommu_table_free(struct kref *kref)
804 struct iommu_table *tbl;
806 tbl = container_of(kref, struct iommu_table, it_kref);
808 if (tbl->it_ops->free)
809 tbl->it_ops->free(tbl);
816 iommu_debugfs_del(tbl);
818 /* verify that table contains no entries */
819 if (iommu_table_in_use(tbl))
820 pr_warn("%s: Unexpected TCEs\n", __func__);
829 struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
831 if (kref_get_unless_zero(&tbl->it_kref))
836 EXPORT_SYMBOL_GPL(iommu_tce_table_get);
838 int iommu_tce_table_put(struct iommu_table *tbl)
843 return kref_put(&tbl->it_kref, iommu_table_free);
845 EXPORT_SYMBOL_GPL(iommu_tce_table_put);
847 /* Creates TCEs for a user provided buffer. The user buffer must be
848 * contiguous real kernel storage (not vmalloc). The address passed here
849 * comprises a page address and offset into that page. The dma_addr_t
850 * returned will point to the same byte within the page as was passed in.
852 dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
853 struct page *page, unsigned long offset, size_t size,
854 unsigned long mask, enum dma_data_direction direction,
857 dma_addr_t dma_handle = DMA_MAPPING_ERROR;
860 unsigned int npages, align;
862 BUG_ON(direction == DMA_NONE);
864 vaddr = page_address(page) + offset;
865 uaddr = (unsigned long)vaddr;
868 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
870 if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
871 ((unsigned long)vaddr & ~PAGE_MASK) == 0)
872 align = PAGE_SHIFT - tbl->it_page_shift;
874 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
875 mask >> tbl->it_page_shift, align,
877 if (dma_handle == DMA_MAPPING_ERROR) {
878 if (!(attrs & DMA_ATTR_NO_WARN) &&
879 printk_ratelimit()) {
880 dev_info(dev, "iommu_alloc failed, tbl %p "
881 "vaddr %p npages %d\n", tbl, vaddr,
885 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
891 void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
892 size_t size, enum dma_data_direction direction,
897 BUG_ON(direction == DMA_NONE);
900 npages = iommu_num_pages(dma_handle, size,
901 IOMMU_PAGE_SIZE(tbl));
902 iommu_free(tbl, dma_handle, npages);
906 /* Allocates a contiguous real buffer and creates mappings over it.
907 * Returns the virtual address of the buffer and sets dma_handle
908 * to the dma address (mapping) of the first page.
910 void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
911 size_t size, dma_addr_t *dma_handle,
912 unsigned long mask, gfp_t flag, int node)
917 unsigned int nio_pages, io_order;
919 int tcesize = (1 << tbl->it_page_shift);
921 size = PAGE_ALIGN(size);
922 order = get_order(size);
925 * Client asked for way too much space. This is checked later
926 * anyway. It is easier to debug here for the drivers than in
929 if (order >= IOMAP_MAX_ORDER) {
930 dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
938 /* Alloc enough pages (and possibly more) */
939 page = alloc_pages_node(node, flag, order);
942 ret = page_address(page);
943 memset(ret, 0, size);
945 /* Set up tces to cover the allocated range */
946 nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
948 io_order = get_iommu_order(size, tbl);
949 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
950 mask >> tbl->it_page_shift, io_order, 0);
951 if (mapping == DMA_MAPPING_ERROR) {
952 free_pages((unsigned long)ret, order);
956 *dma_handle = mapping | ((u64)ret & (tcesize - 1));
960 void iommu_free_coherent(struct iommu_table *tbl, size_t size,
961 void *vaddr, dma_addr_t dma_handle)
964 unsigned int nio_pages;
966 size = PAGE_ALIGN(size);
967 nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
968 iommu_free(tbl, dma_handle, nio_pages);
969 size = PAGE_ALIGN(size);
970 free_pages((unsigned long)vaddr, get_order(size));
974 unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
977 case DMA_BIDIRECTIONAL:
978 return TCE_PCI_READ | TCE_PCI_WRITE;
979 case DMA_FROM_DEVICE:
980 return TCE_PCI_WRITE;
987 EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
989 #ifdef CONFIG_IOMMU_API
993 static void group_release(void *iommu_data)
995 struct iommu_table_group *table_group = iommu_data;
997 table_group->group = NULL;
1000 void iommu_register_group(struct iommu_table_group *table_group,
1001 int pci_domain_number, unsigned long pe_num)
1003 struct iommu_group *grp;
1006 grp = iommu_group_alloc();
1008 pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
1012 table_group->group = grp;
1013 iommu_group_set_iommudata(grp, table_group, group_release);
1014 name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
1015 pci_domain_number, pe_num);
1018 iommu_group_set_name(grp, name);
1022 enum dma_data_direction iommu_tce_direction(unsigned long tce)
1024 if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
1025 return DMA_BIDIRECTIONAL;
1026 else if (tce & TCE_PCI_READ)
1027 return DMA_TO_DEVICE;
1028 else if (tce & TCE_PCI_WRITE)
1029 return DMA_FROM_DEVICE;
1033 EXPORT_SYMBOL_GPL(iommu_tce_direction);
1035 void iommu_flush_tce(struct iommu_table *tbl)
1037 /* Flush/invalidate TLB caches if necessary */
1038 if (tbl->it_ops->flush)
1039 tbl->it_ops->flush(tbl);
1041 /* Make sure updates are seen by hardware */
1044 EXPORT_SYMBOL_GPL(iommu_flush_tce);
1046 int iommu_tce_check_ioba(unsigned long page_shift,
1047 unsigned long offset, unsigned long size,
1048 unsigned long ioba, unsigned long npages)
1050 unsigned long mask = (1UL << page_shift) - 1;
1055 ioba >>= page_shift;
1059 if ((ioba + 1) > (offset + size))
1064 EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
1066 int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
1068 unsigned long mask = (1UL << page_shift) - 1;
1075 EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
1077 long iommu_tce_xchg_no_kill(struct mm_struct *mm,
1078 struct iommu_table *tbl,
1079 unsigned long entry, unsigned long *hpa,
1080 enum dma_data_direction *direction)
1083 unsigned long size = 0;
1085 ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction);
1086 if (!ret && ((*direction == DMA_FROM_DEVICE) ||
1087 (*direction == DMA_BIDIRECTIONAL)) &&
1088 !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
1090 SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
1094 EXPORT_SYMBOL_GPL(iommu_tce_xchg_no_kill);
1096 void iommu_tce_kill(struct iommu_table *tbl,
1097 unsigned long entry, unsigned long pages)
1099 if (tbl->it_ops->tce_kill)
1100 tbl->it_ops->tce_kill(tbl, entry, pages);
1102 EXPORT_SYMBOL_GPL(iommu_tce_kill);
1104 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1105 static int iommu_take_ownership(struct iommu_table *tbl)
1107 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1111 * VFIO does not control TCE entries allocation and the guest
1112 * can write new TCEs on top of existing ones so iommu_tce_build()
1113 * must be able to release old pages. This functionality
1114 * requires exchange() callback defined so if it is not
1115 * implemented, we disallow taking ownership over the table.
1117 if (!tbl->it_ops->xchg_no_kill)
1120 spin_lock_irqsave(&tbl->large_pool.lock, flags);
1121 for (i = 0; i < tbl->nr_pools; i++)
1122 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
1124 if (iommu_table_in_use(tbl)) {
1125 pr_err("iommu_tce: it_map is not empty");
1128 memset(tbl->it_map, 0xff, sz);
1131 for (i = 0; i < tbl->nr_pools; i++)
1132 spin_unlock(&tbl->pools[i].lock);
1133 spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1138 static void iommu_release_ownership(struct iommu_table *tbl)
1140 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1142 spin_lock_irqsave(&tbl->large_pool.lock, flags);
1143 for (i = 0; i < tbl->nr_pools; i++)
1144 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
1146 memset(tbl->it_map, 0, sz);
1148 iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
1149 tbl->it_reserved_end);
1151 for (i = 0; i < tbl->nr_pools; i++)
1152 spin_unlock(&tbl->pools[i].lock);
1153 spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1157 int iommu_add_device(struct iommu_table_group *table_group, struct device *dev)
1160 * The sysfs entries should be populated before
1161 * binding IOMMU group. If sysfs entries isn't
1162 * ready, we simply bail.
1164 if (!device_is_registered(dev))
1167 if (device_iommu_mapped(dev)) {
1168 pr_debug("%s: Skipping device %s with iommu group %d\n",
1169 __func__, dev_name(dev),
1170 iommu_group_id(dev->iommu_group));
1174 pr_debug("%s: Adding %s to iommu group %d\n",
1175 __func__, dev_name(dev), iommu_group_id(table_group->group));
1177 * This is still not adding devices via the IOMMU bus notifier because
1178 * of pcibios_init() from arch/powerpc/kernel/pci_64.c which calls
1179 * pcibios_scan_phb() first (and this guy adds devices and triggers
1180 * the notifier) and only then it calls pci_bus_add_devices() which
1181 * configures DMA for buses which also creates PEs and IOMMU groups.
1183 return iommu_probe_device(dev);
1185 EXPORT_SYMBOL_GPL(iommu_add_device);
1187 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1189 * A simple iommu_table_group_ops which only allows reusing the existing
1190 * iommu_table. This handles VFIO for POWER7 or the nested KVM.
1191 * The ops does not allow creating windows and only allows reusing the existing
1192 * one if it matches table_group->tce32_start/tce32_size/page_shift.
1194 static unsigned long spapr_tce_get_table_size(__u32 page_shift,
1195 __u64 window_size, __u32 levels)
1201 size = window_size >> (page_shift - 3);
1205 static long spapr_tce_create_table(struct iommu_table_group *table_group, int num,
1206 __u32 page_shift, __u64 window_size, __u32 levels,
1207 struct iommu_table **ptbl)
1209 struct iommu_table *tbl = table_group->tables[0];
1214 if (tbl->it_page_shift != page_shift ||
1215 tbl->it_size != (window_size >> page_shift) ||
1216 tbl->it_indirect_levels != levels - 1)
1219 *ptbl = iommu_tce_table_get(tbl);
1223 static long spapr_tce_set_window(struct iommu_table_group *table_group,
1224 int num, struct iommu_table *tbl)
1226 return tbl == table_group->tables[num] ? 0 : -EPERM;
1229 static long spapr_tce_unset_window(struct iommu_table_group *table_group, int num)
1234 static long spapr_tce_take_ownership(struct iommu_table_group *table_group)
1238 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1239 struct iommu_table *tbl = table_group->tables[i];
1241 if (!tbl || !tbl->it_map)
1244 rc = iommu_take_ownership(tbl);
1248 for (j = 0; j < i; ++j)
1249 iommu_release_ownership(table_group->tables[j]);
1255 static void spapr_tce_release_ownership(struct iommu_table_group *table_group)
1259 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1260 struct iommu_table *tbl = table_group->tables[i];
1265 iommu_table_clear(tbl);
1267 iommu_release_ownership(tbl);
1271 struct iommu_table_group_ops spapr_tce_table_group_ops = {
1272 .get_table_size = spapr_tce_get_table_size,
1273 .create_table = spapr_tce_create_table,
1274 .set_window = spapr_tce_set_window,
1275 .unset_window = spapr_tce_unset_window,
1276 .take_ownership = spapr_tce_take_ownership,
1277 .release_ownership = spapr_tce_release_ownership,
1281 * A simple iommu_ops to allow less cruft in generic VFIO code.
1284 spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain,
1287 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1288 struct iommu_group *grp = iommu_group_get(dev);
1289 struct iommu_table_group *table_group;
1292 /* At first attach the ownership is already set */
1294 iommu_group_put(grp);
1301 table_group = iommu_group_get_iommudata(grp);
1302 ret = table_group->ops->take_ownership(table_group);
1303 iommu_group_put(grp);
1308 static const struct iommu_domain_ops spapr_tce_platform_domain_ops = {
1309 .attach_dev = spapr_tce_platform_iommu_attach_dev,
1312 static struct iommu_domain spapr_tce_platform_domain = {
1313 .type = IOMMU_DOMAIN_PLATFORM,
1314 .ops = &spapr_tce_platform_domain_ops,
1317 static struct iommu_domain spapr_tce_blocked_domain = {
1318 .type = IOMMU_DOMAIN_BLOCKED,
1320 * FIXME: SPAPR mixes blocked and platform behaviors, the blocked domain
1321 * also sets the dma_api ops
1323 .ops = &spapr_tce_platform_domain_ops,
1326 static bool spapr_tce_iommu_capable(struct device *dev, enum iommu_cap cap)
1329 case IOMMU_CAP_CACHE_COHERENCY:
1338 static struct iommu_device *spapr_tce_iommu_probe_device(struct device *dev)
1340 struct pci_dev *pdev;
1341 struct pci_controller *hose;
1343 if (!dev_is_pci(dev))
1344 return ERR_PTR(-ENODEV);
1346 pdev = to_pci_dev(dev);
1347 hose = pdev->bus->sysdata;
1349 return &hose->iommu;
1352 static void spapr_tce_iommu_release_device(struct device *dev)
1356 static struct iommu_group *spapr_tce_iommu_device_group(struct device *dev)
1358 struct pci_controller *hose;
1359 struct pci_dev *pdev;
1361 pdev = to_pci_dev(dev);
1362 hose = pdev->bus->sysdata;
1364 if (!hose->controller_ops.device_group)
1365 return ERR_PTR(-ENOENT);
1367 return hose->controller_ops.device_group(hose, pdev);
1370 static const struct iommu_ops spapr_tce_iommu_ops = {
1371 .default_domain = &spapr_tce_platform_domain,
1372 .blocked_domain = &spapr_tce_blocked_domain,
1373 .capable = spapr_tce_iommu_capable,
1374 .probe_device = spapr_tce_iommu_probe_device,
1375 .release_device = spapr_tce_iommu_release_device,
1376 .device_group = spapr_tce_iommu_device_group,
1379 static struct attribute *spapr_tce_iommu_attrs[] = {
1383 static struct attribute_group spapr_tce_iommu_group = {
1384 .name = "spapr-tce-iommu",
1385 .attrs = spapr_tce_iommu_attrs,
1388 static const struct attribute_group *spapr_tce_iommu_groups[] = {
1389 &spapr_tce_iommu_group,
1393 void ppc_iommu_register_device(struct pci_controller *phb)
1395 iommu_device_sysfs_add(&phb->iommu, phb->parent,
1396 spapr_tce_iommu_groups, "iommu-phb%04x",
1397 phb->global_number);
1398 iommu_device_register(&phb->iommu, &spapr_tce_iommu_ops,
1402 void ppc_iommu_unregister_device(struct pci_controller *phb)
1404 iommu_device_unregister(&phb->iommu);
1405 iommu_device_sysfs_remove(&phb->iommu);
1409 * This registers IOMMU devices of PHBs. This needs to happen
1410 * after core_initcall(iommu_init) + postcore_initcall(pci_driver_init) and
1411 * before subsys_initcall(iommu_subsys_init).
1413 static int __init spapr_tce_setup_phb_iommus_initcall(void)
1415 struct pci_controller *hose;
1417 list_for_each_entry(hose, &hose_list, list_node) {
1418 ppc_iommu_register_device(hose);
1422 postcore_initcall_sync(spapr_tce_setup_phb_iommus_initcall);
1425 #endif /* CONFIG_IOMMU_API */