1 // SPDX-License-Identifier: GPL-2.0-only
3 * VFIO: IOMMU DMA mapping support for TCE on POWER
5 * Copyright (C) 2013 IBM Corp. All rights reserved.
6 * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
7 * Copyright Gavin Shan, IBM Corporation 2014.
9 * Derived from original vfio_iommu_type1.c:
10 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
11 * Author: Alex Williamson <alex.williamson@redhat.com>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/uaccess.h>
18 #include <linux/err.h>
19 #include <linux/vfio.h>
20 #include <linux/vmalloc.h>
21 #include <linux/sched/mm.h>
22 #include <linux/sched/signal.h>
26 #include <asm/iommu.h>
28 #include <asm/mmu_context.h>
30 #define DRIVER_VERSION "0.1"
31 #define DRIVER_AUTHOR "aik@ozlabs.ru"
32 #define DRIVER_DESC "VFIO IOMMU SPAPR TCE"
34 static void tce_iommu_detach_group(void *iommu_data,
35 struct iommu_group *iommu_group);
38 * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
40 * This code handles mapping and unmapping of user data buffers
41 * into DMA'ble space using the IOMMU
44 struct tce_iommu_group {
45 struct list_head next;
46 struct iommu_group *grp;
50 * A container needs to remember which preregistered region it has
51 * referenced to do proper cleanup at the userspace process exit.
53 struct tce_iommu_prereg {
54 struct list_head next;
55 struct mm_iommu_table_group_mem_t *mem;
59 * The container descriptor supports only a single group per container.
60 * Required by the API as the container is not supplied with the IOMMU group
61 * at the moment of initialization.
63 struct tce_container {
67 bool def_window_pending;
68 unsigned long locked_pages;
70 struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
71 struct list_head group_list;
72 struct list_head prereg_list;
75 static long tce_iommu_mm_set(struct tce_container *container)
78 if (container->mm == current->mm)
83 container->mm = current->mm;
84 mmgrab(container->mm);
89 static long tce_iommu_prereg_free(struct tce_container *container,
90 struct tce_iommu_prereg *tcemem)
94 ret = mm_iommu_put(container->mm, tcemem->mem);
98 list_del(&tcemem->next);
104 static long tce_iommu_unregister_pages(struct tce_container *container,
105 __u64 vaddr, __u64 size)
107 struct mm_iommu_table_group_mem_t *mem;
108 struct tce_iommu_prereg *tcemem;
112 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
115 mem = mm_iommu_get(container->mm, vaddr, size >> PAGE_SHIFT);
119 list_for_each_entry(tcemem, &container->prereg_list, next) {
120 if (tcemem->mem == mem) {
129 ret = tce_iommu_prereg_free(container, tcemem);
131 mm_iommu_put(container->mm, mem);
136 static long tce_iommu_register_pages(struct tce_container *container,
137 __u64 vaddr, __u64 size)
140 struct mm_iommu_table_group_mem_t *mem = NULL;
141 struct tce_iommu_prereg *tcemem;
142 unsigned long entries = size >> PAGE_SHIFT;
144 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
145 ((vaddr + size) < vaddr))
148 mem = mm_iommu_get(container->mm, vaddr, entries);
150 list_for_each_entry(tcemem, &container->prereg_list, next) {
151 if (tcemem->mem == mem) {
157 ret = mm_iommu_new(container->mm, vaddr, entries, &mem);
162 tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
169 list_add(&tcemem->next, &container->prereg_list);
171 container->enabled = true;
176 mm_iommu_put(container->mm, mem);
180 static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa,
181 unsigned int it_page_shift)
184 unsigned long size = 0;
186 if (mm_iommu_is_devmem(mm, hpa, it_page_shift, &size))
187 return size == (1UL << it_page_shift);
189 page = pfn_to_page(hpa >> PAGE_SHIFT);
191 * Check that the TCE table granularity is not bigger than the size of
192 * a page we just found. Otherwise the hardware can get access to
193 * a bigger memory chunk that it should.
195 return page_shift(compound_head(page)) >= it_page_shift;
198 static inline bool tce_groups_attached(struct tce_container *container)
200 return !list_empty(&container->group_list);
203 static long tce_iommu_find_table(struct tce_container *container,
204 phys_addr_t ioba, struct iommu_table **ptbl)
208 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
209 struct iommu_table *tbl = container->tables[i];
212 unsigned long entry = ioba >> tbl->it_page_shift;
213 unsigned long start = tbl->it_offset;
214 unsigned long end = start + tbl->it_size;
216 if ((start <= entry) && (entry < end)) {
226 static int tce_iommu_find_free_table(struct tce_container *container)
230 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
231 if (!container->tables[i])
238 static int tce_iommu_enable(struct tce_container *container)
241 unsigned long locked;
242 struct iommu_table_group *table_group;
243 struct tce_iommu_group *tcegrp;
245 if (container->enabled)
249 * When userspace pages are mapped into the IOMMU, they are effectively
250 * locked memory, so, theoretically, we need to update the accounting
251 * of locked pages on each map and unmap. For powerpc, the map unmap
252 * paths can be very hot, though, and the accounting would kill
253 * performance, especially since it would be difficult to impossible
254 * to handle the accounting in real mode only.
256 * To address that, rather than precisely accounting every page, we
257 * instead account for a worst case on locked memory when the iommu is
258 * enabled and disabled. The worst case upper bound on locked memory
259 * is the size of the whole iommu window, which is usually relatively
260 * small (compared to total memory sizes) on POWER hardware.
262 * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
263 * that would effectively kill the guest at random points, much better
264 * enforcing the limit based on the max that the guest can map.
266 * Unfortunately at the moment it counts whole tables, no matter how
267 * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
268 * each with 2GB DMA window, 8GB will be counted here. The reason for
269 * this is that we cannot tell here the amount of RAM used by the guest
270 * as this information is only available from KVM and VFIO is
273 * So we do not allow enabling a container without a group attached
274 * as there is no way to know how much we should increment
275 * the locked_vm counter.
277 if (!tce_groups_attached(container))
280 tcegrp = list_first_entry(&container->group_list,
281 struct tce_iommu_group, next);
282 table_group = iommu_group_get_iommudata(tcegrp->grp);
286 if (!table_group->tce32_size)
289 ret = tce_iommu_mm_set(container);
293 locked = table_group->tce32_size >> PAGE_SHIFT;
294 ret = account_locked_vm(container->mm, locked, true);
298 container->locked_pages = locked;
300 container->enabled = true;
305 static void tce_iommu_disable(struct tce_container *container)
307 if (!container->enabled)
310 container->enabled = false;
312 BUG_ON(!container->mm);
313 account_locked_vm(container->mm, container->locked_pages, false);
316 static void *tce_iommu_open(unsigned long arg)
318 struct tce_container *container;
320 if ((arg != VFIO_SPAPR_TCE_IOMMU) && (arg != VFIO_SPAPR_TCE_v2_IOMMU)) {
321 pr_err("tce_vfio: Wrong IOMMU type\n");
322 return ERR_PTR(-EINVAL);
325 container = kzalloc(sizeof(*container), GFP_KERNEL);
327 return ERR_PTR(-ENOMEM);
329 mutex_init(&container->lock);
330 INIT_LIST_HEAD_RCU(&container->group_list);
331 INIT_LIST_HEAD_RCU(&container->prereg_list);
333 container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
338 static int tce_iommu_clear(struct tce_container *container,
339 struct iommu_table *tbl,
340 unsigned long entry, unsigned long pages);
341 static void tce_iommu_free_table(struct tce_container *container,
342 struct iommu_table *tbl);
344 static void tce_iommu_release(void *iommu_data)
346 struct tce_container *container = iommu_data;
347 struct tce_iommu_group *tcegrp;
348 struct tce_iommu_prereg *tcemem, *tmtmp;
351 while (tce_groups_attached(container)) {
352 tcegrp = list_first_entry(&container->group_list,
353 struct tce_iommu_group, next);
354 tce_iommu_detach_group(iommu_data, tcegrp->grp);
358 * If VFIO created a table, it was not disposed
359 * by tce_iommu_detach_group() so do it now.
361 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
362 struct iommu_table *tbl = container->tables[i];
367 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
368 tce_iommu_free_table(container, tbl);
371 list_for_each_entry_safe(tcemem, tmtmp, &container->prereg_list, next)
372 WARN_ON(tce_iommu_prereg_free(container, tcemem));
374 tce_iommu_disable(container);
376 mmdrop(container->mm);
377 mutex_destroy(&container->lock);
382 static void tce_iommu_unuse_page(unsigned long hpa)
386 page = pfn_to_page(hpa >> PAGE_SHIFT);
387 unpin_user_page(page);
390 static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
391 unsigned long tce, unsigned long shift,
392 unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
395 struct mm_iommu_table_group_mem_t *mem;
397 mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift);
401 ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa);
410 static void tce_iommu_unuse_page_v2(struct tce_container *container,
411 struct iommu_table *tbl, unsigned long entry)
413 struct mm_iommu_table_group_mem_t *mem = NULL;
415 unsigned long hpa = 0;
416 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
421 ret = tce_iommu_prereg_ua_to_hpa(container, be64_to_cpu(*pua),
422 tbl->it_page_shift, &hpa, &mem);
424 pr_debug("%s: tce %llx at #%lx was not cached, ret=%d\n",
425 __func__, be64_to_cpu(*pua), entry, ret);
427 mm_iommu_mapped_dec(mem);
429 *pua = cpu_to_be64(0);
432 static int tce_iommu_clear(struct tce_container *container,
433 struct iommu_table *tbl,
434 unsigned long entry, unsigned long pages)
436 unsigned long oldhpa;
438 enum dma_data_direction direction;
439 unsigned long lastentry = entry + pages, firstentry = entry;
441 for ( ; entry < lastentry; ++entry) {
442 if (tbl->it_indirect_levels && tbl->it_userspace) {
444 * For multilevel tables, we can take a shortcut here
445 * and skip some TCEs as we know that the userspace
446 * addresses cache is a mirror of the real TCE table
447 * and if it is missing some indirect levels, then
448 * the hardware table does not have them allocated
449 * either and therefore does not require updating.
451 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl,
454 /* align to level_size which is power of two */
455 entry |= tbl->it_level_size - 1;
462 direction = DMA_NONE;
464 ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry, &oldhpa,
469 if (direction == DMA_NONE)
473 tce_iommu_unuse_page_v2(container, tbl, entry);
477 tce_iommu_unuse_page(oldhpa);
480 iommu_tce_kill(tbl, firstentry, pages);
485 static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
487 struct page *page = NULL;
488 enum dma_data_direction direction = iommu_tce_direction(tce);
490 if (pin_user_pages_fast(tce & PAGE_MASK, 1,
491 direction != DMA_TO_DEVICE ? FOLL_WRITE : 0,
495 *hpa = __pa((unsigned long) page_address(page));
500 static long tce_iommu_build(struct tce_container *container,
501 struct iommu_table *tbl,
502 unsigned long entry, unsigned long tce, unsigned long pages,
503 enum dma_data_direction direction)
507 enum dma_data_direction dirtmp;
509 for (i = 0; i < pages; ++i) {
510 unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
512 ret = tce_iommu_use_page(tce, &hpa);
516 if (!tce_page_is_contained(container->mm, hpa,
517 tbl->it_page_shift)) {
524 ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i,
527 tce_iommu_unuse_page(hpa);
528 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
529 __func__, entry << tbl->it_page_shift,
534 if (dirtmp != DMA_NONE)
535 tce_iommu_unuse_page(hpa);
537 tce += IOMMU_PAGE_SIZE(tbl);
541 tce_iommu_clear(container, tbl, entry, i);
543 iommu_tce_kill(tbl, entry, pages);
548 static long tce_iommu_build_v2(struct tce_container *container,
549 struct iommu_table *tbl,
550 unsigned long entry, unsigned long tce, unsigned long pages,
551 enum dma_data_direction direction)
555 enum dma_data_direction dirtmp;
557 for (i = 0; i < pages; ++i) {
558 struct mm_iommu_table_group_mem_t *mem = NULL;
559 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry + i);
561 ret = tce_iommu_prereg_ua_to_hpa(container,
562 tce, tbl->it_page_shift, &hpa, &mem);
566 if (!tce_page_is_contained(container->mm, hpa,
567 tbl->it_page_shift)) {
572 /* Preserve offset within IOMMU page */
573 hpa |= tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
576 /* The registered region is being unregistered */
577 if (mm_iommu_mapped_inc(mem))
580 ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i,
583 /* dirtmp cannot be DMA_NONE here */
584 tce_iommu_unuse_page_v2(container, tbl, entry + i);
585 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
586 __func__, entry << tbl->it_page_shift,
591 if (dirtmp != DMA_NONE)
592 tce_iommu_unuse_page_v2(container, tbl, entry + i);
594 *pua = cpu_to_be64(tce);
596 tce += IOMMU_PAGE_SIZE(tbl);
600 tce_iommu_clear(container, tbl, entry, i);
602 iommu_tce_kill(tbl, entry, pages);
607 static long tce_iommu_create_table(struct tce_container *container,
608 struct iommu_table_group *table_group,
613 struct iommu_table **ptbl)
615 long ret, table_size;
617 table_size = table_group->ops->get_table_size(page_shift, window_size,
622 ret = account_locked_vm(container->mm, table_size >> PAGE_SHIFT, true);
626 ret = table_group->ops->create_table(table_group, num,
627 page_shift, window_size, levels, ptbl);
629 WARN_ON(!ret && !(*ptbl)->it_ops->free);
630 WARN_ON(!ret && ((*ptbl)->it_allocated_size > table_size));
635 static void tce_iommu_free_table(struct tce_container *container,
636 struct iommu_table *tbl)
638 unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
640 iommu_tce_table_put(tbl);
641 account_locked_vm(container->mm, pages, false);
644 static long tce_iommu_create_window(struct tce_container *container,
645 __u32 page_shift, __u64 window_size, __u32 levels,
648 struct tce_iommu_group *tcegrp;
649 struct iommu_table_group *table_group;
650 struct iommu_table *tbl = NULL;
653 num = tce_iommu_find_free_table(container);
657 /* Get the first group for ops::create_table */
658 tcegrp = list_first_entry(&container->group_list,
659 struct tce_iommu_group, next);
660 table_group = iommu_group_get_iommudata(tcegrp->grp);
664 if (!(table_group->pgsizes & (1ULL << page_shift)))
667 if (!table_group->ops->set_window || !table_group->ops->unset_window ||
668 !table_group->ops->get_table_size ||
669 !table_group->ops->create_table)
672 /* Create TCE table */
673 ret = tce_iommu_create_table(container, table_group, num,
674 page_shift, window_size, levels, &tbl);
678 BUG_ON(!tbl->it_ops->free);
681 * Program the table to every group.
682 * Groups have been tested for compatibility at the attach time.
684 list_for_each_entry(tcegrp, &container->group_list, next) {
685 table_group = iommu_group_get_iommudata(tcegrp->grp);
687 ret = table_group->ops->set_window(table_group, num, tbl);
692 container->tables[num] = tbl;
694 /* Return start address assigned by platform in create_table() */
695 *start_addr = tbl->it_offset << tbl->it_page_shift;
700 list_for_each_entry(tcegrp, &container->group_list, next) {
701 table_group = iommu_group_get_iommudata(tcegrp->grp);
702 table_group->ops->unset_window(table_group, num);
704 tce_iommu_free_table(container, tbl);
709 static long tce_iommu_remove_window(struct tce_container *container,
712 struct iommu_table_group *table_group = NULL;
713 struct iommu_table *tbl;
714 struct tce_iommu_group *tcegrp;
717 num = tce_iommu_find_table(container, start_addr, &tbl);
721 BUG_ON(!tbl->it_size);
723 /* Detach groups from IOMMUs */
724 list_for_each_entry(tcegrp, &container->group_list, next) {
725 table_group = iommu_group_get_iommudata(tcegrp->grp);
728 * SPAPR TCE IOMMU exposes the default DMA window to
729 * the guest via dma32_window_start/size of
730 * VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow
731 * the userspace to remove this window, some do not so
732 * here we check for the platform capability.
734 if (!table_group->ops || !table_group->ops->unset_window)
737 table_group->ops->unset_window(table_group, num);
741 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
742 tce_iommu_free_table(container, tbl);
743 container->tables[num] = NULL;
748 static long tce_iommu_create_default_window(struct tce_container *container)
751 __u64 start_addr = 0;
752 struct tce_iommu_group *tcegrp;
753 struct iommu_table_group *table_group;
755 if (!container->def_window_pending)
758 if (!tce_groups_attached(container))
761 tcegrp = list_first_entry(&container->group_list,
762 struct tce_iommu_group, next);
763 table_group = iommu_group_get_iommudata(tcegrp->grp);
767 ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K,
768 table_group->tce32_size, 1, &start_addr);
769 WARN_ON_ONCE(!ret && start_addr);
772 container->def_window_pending = false;
777 static long vfio_spapr_ioctl_eeh_pe_op(struct iommu_group *group,
781 struct vfio_eeh_pe_op op;
784 pe = eeh_iommu_group_to_pe(group);
788 minsz = offsetofend(struct vfio_eeh_pe_op, op);
789 if (copy_from_user(&op, (void __user *)arg, minsz))
791 if (op.argsz < minsz || op.flags)
795 case VFIO_EEH_PE_DISABLE:
796 return eeh_pe_set_option(pe, EEH_OPT_DISABLE);
797 case VFIO_EEH_PE_ENABLE:
798 return eeh_pe_set_option(pe, EEH_OPT_ENABLE);
799 case VFIO_EEH_PE_UNFREEZE_IO:
800 return eeh_pe_set_option(pe, EEH_OPT_THAW_MMIO);
801 case VFIO_EEH_PE_UNFREEZE_DMA:
802 return eeh_pe_set_option(pe, EEH_OPT_THAW_DMA);
803 case VFIO_EEH_PE_GET_STATE:
804 return eeh_pe_get_state(pe);
806 case VFIO_EEH_PE_RESET_DEACTIVATE:
807 return eeh_pe_reset(pe, EEH_RESET_DEACTIVATE, true);
808 case VFIO_EEH_PE_RESET_HOT:
809 return eeh_pe_reset(pe, EEH_RESET_HOT, true);
810 case VFIO_EEH_PE_RESET_FUNDAMENTAL:
811 return eeh_pe_reset(pe, EEH_RESET_FUNDAMENTAL, true);
812 case VFIO_EEH_PE_CONFIGURE:
813 return eeh_pe_configure(pe);
814 case VFIO_EEH_PE_INJECT_ERR:
815 minsz = offsetofend(struct vfio_eeh_pe_op, err.mask);
816 if (op.argsz < minsz)
818 if (copy_from_user(&op, (void __user *)arg, minsz))
821 return eeh_pe_inject_err(pe, op.err.type, op.err.func,
822 op.err.addr, op.err.mask);
828 static long tce_iommu_ioctl(void *iommu_data,
829 unsigned int cmd, unsigned long arg)
831 struct tce_container *container = iommu_data;
832 unsigned long minsz, ddwsz;
836 case VFIO_CHECK_EXTENSION:
838 case VFIO_SPAPR_TCE_IOMMU:
839 case VFIO_SPAPR_TCE_v2_IOMMU:
842 return eeh_enabled();
849 * Sanity check to prevent one userspace from manipulating
850 * another userspace mm.
853 if (container->mm && container->mm != current->mm)
857 case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
858 struct vfio_iommu_spapr_tce_info info;
859 struct tce_iommu_group *tcegrp;
860 struct iommu_table_group *table_group;
862 if (!tce_groups_attached(container))
865 tcegrp = list_first_entry(&container->group_list,
866 struct tce_iommu_group, next);
867 table_group = iommu_group_get_iommudata(tcegrp->grp);
872 minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
875 if (copy_from_user(&info, (void __user *)arg, minsz))
878 if (info.argsz < minsz)
881 info.dma32_window_start = table_group->tce32_start;
882 info.dma32_window_size = table_group->tce32_size;
884 memset(&info.ddw, 0, sizeof(info.ddw));
886 if (table_group->max_dynamic_windows_supported &&
888 info.flags |= VFIO_IOMMU_SPAPR_INFO_DDW;
889 info.ddw.pgsizes = table_group->pgsizes;
890 info.ddw.max_dynamic_windows_supported =
891 table_group->max_dynamic_windows_supported;
892 info.ddw.levels = table_group->max_levels;
895 ddwsz = offsetofend(struct vfio_iommu_spapr_tce_info, ddw);
897 if (info.argsz >= ddwsz)
900 if (copy_to_user((void __user *)arg, &info, minsz))
905 case VFIO_IOMMU_MAP_DMA: {
906 struct vfio_iommu_type1_dma_map param;
907 struct iommu_table *tbl = NULL;
909 enum dma_data_direction direction;
911 if (!container->enabled)
914 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
916 if (copy_from_user(¶m, (void __user *)arg, minsz))
919 if (param.argsz < minsz)
922 if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ |
923 VFIO_DMA_MAP_FLAG_WRITE))
926 ret = tce_iommu_create_default_window(container);
930 num = tce_iommu_find_table(container, param.iova, &tbl);
934 if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
935 (param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
938 /* iova is checked by the IOMMU API */
939 if (param.flags & VFIO_DMA_MAP_FLAG_READ) {
940 if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
941 direction = DMA_BIDIRECTIONAL;
943 direction = DMA_TO_DEVICE;
945 if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
946 direction = DMA_FROM_DEVICE;
951 ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr);
956 ret = tce_iommu_build_v2(container, tbl,
957 param.iova >> tbl->it_page_shift,
959 param.size >> tbl->it_page_shift,
962 ret = tce_iommu_build(container, tbl,
963 param.iova >> tbl->it_page_shift,
965 param.size >> tbl->it_page_shift,
968 iommu_flush_tce(tbl);
972 case VFIO_IOMMU_UNMAP_DMA: {
973 struct vfio_iommu_type1_dma_unmap param;
974 struct iommu_table *tbl = NULL;
977 if (!container->enabled)
980 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
983 if (copy_from_user(¶m, (void __user *)arg, minsz))
986 if (param.argsz < minsz)
989 /* No flag is supported now */
993 ret = tce_iommu_create_default_window(container);
997 num = tce_iommu_find_table(container, param.iova, &tbl);
1001 if (param.size & ~IOMMU_PAGE_MASK(tbl))
1004 ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
1005 param.size >> tbl->it_page_shift);
1009 ret = tce_iommu_clear(container, tbl,
1010 param.iova >> tbl->it_page_shift,
1011 param.size >> tbl->it_page_shift);
1012 iommu_flush_tce(tbl);
1016 case VFIO_IOMMU_SPAPR_REGISTER_MEMORY: {
1017 struct vfio_iommu_spapr_register_memory param;
1022 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
1025 ret = tce_iommu_mm_set(container);
1029 if (copy_from_user(¶m, (void __user *)arg, minsz))
1032 if (param.argsz < minsz)
1035 /* No flag is supported now */
1039 mutex_lock(&container->lock);
1040 ret = tce_iommu_register_pages(container, param.vaddr,
1042 mutex_unlock(&container->lock);
1046 case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY: {
1047 struct vfio_iommu_spapr_register_memory param;
1055 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
1058 if (copy_from_user(¶m, (void __user *)arg, minsz))
1061 if (param.argsz < minsz)
1064 /* No flag is supported now */
1068 mutex_lock(&container->lock);
1069 ret = tce_iommu_unregister_pages(container, param.vaddr,
1071 mutex_unlock(&container->lock);
1075 case VFIO_IOMMU_ENABLE:
1079 mutex_lock(&container->lock);
1080 ret = tce_iommu_enable(container);
1081 mutex_unlock(&container->lock);
1085 case VFIO_IOMMU_DISABLE:
1089 mutex_lock(&container->lock);
1090 tce_iommu_disable(container);
1091 mutex_unlock(&container->lock);
1094 case VFIO_EEH_PE_OP: {
1095 struct tce_iommu_group *tcegrp;
1098 list_for_each_entry(tcegrp, &container->group_list, next) {
1099 ret = vfio_spapr_ioctl_eeh_pe_op(tcegrp->grp, arg);
1106 case VFIO_IOMMU_SPAPR_TCE_CREATE: {
1107 struct vfio_iommu_spapr_tce_create create;
1112 ret = tce_iommu_mm_set(container);
1116 if (!tce_groups_attached(container))
1119 minsz = offsetofend(struct vfio_iommu_spapr_tce_create,
1122 if (copy_from_user(&create, (void __user *)arg, minsz))
1125 if (create.argsz < minsz)
1131 mutex_lock(&container->lock);
1133 ret = tce_iommu_create_default_window(container);
1135 ret = tce_iommu_create_window(container,
1137 create.window_size, create.levels,
1138 &create.start_addr);
1140 mutex_unlock(&container->lock);
1142 if (!ret && copy_to_user((void __user *)arg, &create, minsz))
1147 case VFIO_IOMMU_SPAPR_TCE_REMOVE: {
1148 struct vfio_iommu_spapr_tce_remove remove;
1153 ret = tce_iommu_mm_set(container);
1157 if (!tce_groups_attached(container))
1160 minsz = offsetofend(struct vfio_iommu_spapr_tce_remove,
1163 if (copy_from_user(&remove, (void __user *)arg, minsz))
1166 if (remove.argsz < minsz)
1172 if (container->def_window_pending && !remove.start_addr) {
1173 container->def_window_pending = false;
1177 mutex_lock(&container->lock);
1179 ret = tce_iommu_remove_window(container, remove.start_addr);
1181 mutex_unlock(&container->lock);
1190 static void tce_iommu_release_ownership(struct tce_container *container,
1191 struct iommu_table_group *table_group)
1195 if (!table_group->ops->unset_window) {
1200 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1201 if (container->tables[i])
1202 table_group->ops->unset_window(table_group, i);
1205 static long tce_iommu_take_ownership(struct tce_container *container,
1206 struct iommu_table_group *table_group)
1210 /* Set all windows to the new group */
1211 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1212 struct iommu_table *tbl = container->tables[i];
1217 ret = table_group->ops->set_window(table_group, i, tbl);
1225 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1226 table_group->ops->unset_window(table_group, i);
1231 static int tce_iommu_attach_group(void *iommu_data,
1232 struct iommu_group *iommu_group, enum vfio_group_type type)
1235 struct tce_container *container = iommu_data;
1236 struct iommu_table_group *table_group;
1237 struct tce_iommu_group *tcegrp = NULL;
1239 if (type == VFIO_EMULATED_IOMMU)
1242 mutex_lock(&container->lock);
1244 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
1245 iommu_group_id(iommu_group), iommu_group); */
1246 table_group = iommu_group_get_iommudata(iommu_group);
1252 /* v2 requires full support of dynamic DMA windows */
1253 if (container->v2 && table_group->max_dynamic_windows_supported == 0) {
1258 /* v1 reuses TCE tables and does not share them among PEs */
1259 if (!container->v2 && tce_groups_attached(container)) {
1265 * Check if new group has the same iommu_table_group_ops
1268 list_for_each_entry(tcegrp, &container->group_list, next) {
1269 struct iommu_table_group *table_group_tmp;
1271 if (tcegrp->grp == iommu_group) {
1272 pr_warn("tce_vfio: Group %d is already attached\n",
1273 iommu_group_id(iommu_group));
1277 table_group_tmp = iommu_group_get_iommudata(tcegrp->grp);
1278 if (table_group_tmp->ops->create_table !=
1279 table_group->ops->create_table) {
1280 pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
1281 iommu_group_id(iommu_group),
1282 iommu_group_id(tcegrp->grp));
1288 tcegrp = kzalloc(sizeof(*tcegrp), GFP_KERNEL);
1294 ret = tce_iommu_take_ownership(container, table_group);
1295 if (!tce_groups_attached(container) && !container->tables[0])
1296 container->def_window_pending = true;
1299 tcegrp->grp = iommu_group;
1300 list_add(&tcegrp->next, &container->group_list);
1307 mutex_unlock(&container->lock);
1312 static void tce_iommu_detach_group(void *iommu_data,
1313 struct iommu_group *iommu_group)
1315 struct tce_container *container = iommu_data;
1316 struct iommu_table_group *table_group;
1318 struct tce_iommu_group *tcegrp;
1320 mutex_lock(&container->lock);
1322 list_for_each_entry(tcegrp, &container->group_list, next) {
1323 if (tcegrp->grp == iommu_group) {
1330 pr_warn("tce_vfio: detaching unattached group #%u\n",
1331 iommu_group_id(iommu_group));
1335 list_del(&tcegrp->next);
1338 table_group = iommu_group_get_iommudata(iommu_group);
1339 BUG_ON(!table_group);
1341 tce_iommu_release_ownership(container, table_group);
1344 mutex_unlock(&container->lock);
1347 static const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
1348 .name = "iommu-vfio-powerpc",
1349 .owner = THIS_MODULE,
1350 .open = tce_iommu_open,
1351 .release = tce_iommu_release,
1352 .ioctl = tce_iommu_ioctl,
1353 .attach_group = tce_iommu_attach_group,
1354 .detach_group = tce_iommu_detach_group,
1357 static int __init tce_iommu_init(void)
1359 return vfio_register_iommu_driver(&tce_iommu_driver_ops);
1362 static void __exit tce_iommu_cleanup(void)
1364 vfio_unregister_iommu_driver(&tce_iommu_driver_ops);
1367 module_init(tce_iommu_init);
1368 module_exit(tce_iommu_cleanup);
1370 MODULE_VERSION(DRIVER_VERSION);
1371 MODULE_LICENSE("GPL v2");
1372 MODULE_AUTHOR(DRIVER_AUTHOR);
1373 MODULE_DESCRIPTION(DRIVER_DESC);