2 * VFIO: IOMMU DMA mapping support for TCE on POWER
4 * Copyright (C) 2013 IBM Corp. All rights reserved.
5 * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio_iommu_type1.c:
12 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
13 * Author: Alex Williamson <alex.williamson@redhat.com>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <linux/err.h>
21 #include <linux/vfio.h>
22 #include <linux/vmalloc.h>
23 #include <asm/iommu.h>
25 #include <asm/mmu_context.h>
27 #define DRIVER_VERSION "0.1"
28 #define DRIVER_AUTHOR "aik@ozlabs.ru"
29 #define DRIVER_DESC "VFIO IOMMU SPAPR TCE"
31 static void tce_iommu_detach_group(void *iommu_data,
32 struct iommu_group *iommu_group);
34 static long try_increment_locked_vm(struct mm_struct *mm, long npages)
36 long ret = 0, locked, lock_limit;
38 if (WARN_ON_ONCE(!mm))
44 down_write(&mm->mmap_sem);
45 locked = mm->locked_vm + npages;
46 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
47 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
50 mm->locked_vm += npages;
52 pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
54 mm->locked_vm << PAGE_SHIFT,
55 rlimit(RLIMIT_MEMLOCK),
56 ret ? " - exceeded" : "");
58 up_write(&mm->mmap_sem);
63 static void decrement_locked_vm(struct mm_struct *mm, long npages)
68 down_write(&mm->mmap_sem);
69 if (WARN_ON_ONCE(npages > mm->locked_vm))
70 npages = mm->locked_vm;
71 mm->locked_vm -= npages;
72 pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
74 mm->locked_vm << PAGE_SHIFT,
75 rlimit(RLIMIT_MEMLOCK));
76 up_write(&mm->mmap_sem);
80 * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
82 * This code handles mapping and unmapping of user data buffers
83 * into DMA'ble space using the IOMMU
86 struct tce_iommu_group {
87 struct list_head next;
88 struct iommu_group *grp;
92 * A container needs to remember which preregistered region it has
93 * referenced to do proper cleanup at the userspace process exit.
95 struct tce_iommu_prereg {
96 struct list_head next;
97 struct mm_iommu_table_group_mem_t *mem;
101 * The container descriptor supports only a single group per container.
102 * Required by the API as the container is not supplied with the IOMMU group
103 * at the moment of initialization.
105 struct tce_container {
109 bool def_window_pending;
110 unsigned long locked_pages;
111 struct mm_struct *mm;
112 struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
113 struct list_head group_list;
114 struct list_head prereg_list;
117 static long tce_iommu_mm_set(struct tce_container *container)
120 if (container->mm == current->mm)
124 BUG_ON(!current->mm);
125 container->mm = current->mm;
126 atomic_inc(&container->mm->mm_count);
131 static long tce_iommu_prereg_free(struct tce_container *container,
132 struct tce_iommu_prereg *tcemem)
136 ret = mm_iommu_put(container->mm, tcemem->mem);
140 list_del(&tcemem->next);
146 static long tce_iommu_unregister_pages(struct tce_container *container,
147 __u64 vaddr, __u64 size)
149 struct mm_iommu_table_group_mem_t *mem;
150 struct tce_iommu_prereg *tcemem;
153 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
156 mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT);
160 list_for_each_entry(tcemem, &container->prereg_list, next) {
161 if (tcemem->mem == mem) {
170 return tce_iommu_prereg_free(container, tcemem);
173 static long tce_iommu_register_pages(struct tce_container *container,
174 __u64 vaddr, __u64 size)
177 struct mm_iommu_table_group_mem_t *mem = NULL;
178 struct tce_iommu_prereg *tcemem;
179 unsigned long entries = size >> PAGE_SHIFT;
181 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
182 ((vaddr + size) < vaddr))
185 mem = mm_iommu_find(container->mm, vaddr, entries);
187 list_for_each_entry(tcemem, &container->prereg_list, next) {
188 if (tcemem->mem == mem)
193 ret = mm_iommu_get(container->mm, vaddr, entries, &mem);
197 tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
199 mm_iommu_put(container->mm, mem);
204 list_add(&tcemem->next, &container->prereg_list);
206 container->enabled = true;
211 static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl,
212 struct mm_struct *mm)
214 unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
215 tbl->it_size, PAGE_SIZE);
219 BUG_ON(tbl->it_userspace);
221 ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT);
227 decrement_locked_vm(mm, cb >> PAGE_SHIFT);
230 tbl->it_userspace = uas;
235 static void tce_iommu_userspace_view_free(struct iommu_table *tbl,
236 struct mm_struct *mm)
238 unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
239 tbl->it_size, PAGE_SIZE);
241 if (!tbl->it_userspace)
244 vfree(tbl->it_userspace);
245 tbl->it_userspace = NULL;
246 decrement_locked_vm(mm, cb >> PAGE_SHIFT);
249 static bool tce_page_is_contained(struct page *page, unsigned page_shift)
252 * Check that the TCE table granularity is not bigger than the size of
253 * a page we just found. Otherwise the hardware can get access to
254 * a bigger memory chunk that it should.
256 return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift;
259 static inline bool tce_groups_attached(struct tce_container *container)
261 return !list_empty(&container->group_list);
264 static long tce_iommu_find_table(struct tce_container *container,
265 phys_addr_t ioba, struct iommu_table **ptbl)
269 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
270 struct iommu_table *tbl = container->tables[i];
273 unsigned long entry = ioba >> tbl->it_page_shift;
274 unsigned long start = tbl->it_offset;
275 unsigned long end = start + tbl->it_size;
277 if ((start <= entry) && (entry < end)) {
287 static int tce_iommu_find_free_table(struct tce_container *container)
291 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
292 if (!container->tables[i])
299 static int tce_iommu_enable(struct tce_container *container)
302 unsigned long locked;
303 struct iommu_table_group *table_group;
304 struct tce_iommu_group *tcegrp;
306 if (container->enabled)
310 * When userspace pages are mapped into the IOMMU, they are effectively
311 * locked memory, so, theoretically, we need to update the accounting
312 * of locked pages on each map and unmap. For powerpc, the map unmap
313 * paths can be very hot, though, and the accounting would kill
314 * performance, especially since it would be difficult to impossible
315 * to handle the accounting in real mode only.
317 * To address that, rather than precisely accounting every page, we
318 * instead account for a worst case on locked memory when the iommu is
319 * enabled and disabled. The worst case upper bound on locked memory
320 * is the size of the whole iommu window, which is usually relatively
321 * small (compared to total memory sizes) on POWER hardware.
323 * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
324 * that would effectively kill the guest at random points, much better
325 * enforcing the limit based on the max that the guest can map.
327 * Unfortunately at the moment it counts whole tables, no matter how
328 * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
329 * each with 2GB DMA window, 8GB will be counted here. The reason for
330 * this is that we cannot tell here the amount of RAM used by the guest
331 * as this information is only available from KVM and VFIO is
334 * So we do not allow enabling a container without a group attached
335 * as there is no way to know how much we should increment
336 * the locked_vm counter.
338 if (!tce_groups_attached(container))
341 tcegrp = list_first_entry(&container->group_list,
342 struct tce_iommu_group, next);
343 table_group = iommu_group_get_iommudata(tcegrp->grp);
347 if (!table_group->tce32_size)
350 ret = tce_iommu_mm_set(container);
354 locked = table_group->tce32_size >> PAGE_SHIFT;
355 ret = try_increment_locked_vm(container->mm, locked);
359 container->locked_pages = locked;
361 container->enabled = true;
366 static void tce_iommu_disable(struct tce_container *container)
368 if (!container->enabled)
371 container->enabled = false;
373 BUG_ON(!container->mm);
374 decrement_locked_vm(container->mm, container->locked_pages);
377 static void *tce_iommu_open(unsigned long arg)
379 struct tce_container *container;
381 if ((arg != VFIO_SPAPR_TCE_IOMMU) && (arg != VFIO_SPAPR_TCE_v2_IOMMU)) {
382 pr_err("tce_vfio: Wrong IOMMU type\n");
383 return ERR_PTR(-EINVAL);
386 container = kzalloc(sizeof(*container), GFP_KERNEL);
388 return ERR_PTR(-ENOMEM);
390 mutex_init(&container->lock);
391 INIT_LIST_HEAD_RCU(&container->group_list);
392 INIT_LIST_HEAD_RCU(&container->prereg_list);
394 container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
399 static int tce_iommu_clear(struct tce_container *container,
400 struct iommu_table *tbl,
401 unsigned long entry, unsigned long pages);
402 static void tce_iommu_free_table(struct tce_container *container,
403 struct iommu_table *tbl);
405 static void tce_iommu_release(void *iommu_data)
407 struct tce_container *container = iommu_data;
408 struct tce_iommu_group *tcegrp;
409 struct tce_iommu_prereg *tcemem, *tmtmp;
412 while (tce_groups_attached(container)) {
413 tcegrp = list_first_entry(&container->group_list,
414 struct tce_iommu_group, next);
415 tce_iommu_detach_group(iommu_data, tcegrp->grp);
419 * If VFIO created a table, it was not disposed
420 * by tce_iommu_detach_group() so do it now.
422 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
423 struct iommu_table *tbl = container->tables[i];
428 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
429 tce_iommu_free_table(container, tbl);
432 list_for_each_entry_safe(tcemem, tmtmp, &container->prereg_list, next)
433 WARN_ON(tce_iommu_prereg_free(container, tcemem));
435 tce_iommu_disable(container);
437 mmdrop(container->mm);
438 mutex_destroy(&container->lock);
443 static void tce_iommu_unuse_page(struct tce_container *container,
448 page = pfn_to_page(hpa >> PAGE_SHIFT);
452 static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
453 unsigned long tce, unsigned long size,
454 unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
457 struct mm_iommu_table_group_mem_t *mem;
459 mem = mm_iommu_lookup(container->mm, tce, size);
463 ret = mm_iommu_ua_to_hpa(mem, tce, phpa);
472 static void tce_iommu_unuse_page_v2(struct tce_container *container,
473 struct iommu_table *tbl, unsigned long entry)
475 struct mm_iommu_table_group_mem_t *mem = NULL;
477 unsigned long hpa = 0;
478 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
483 ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl),
486 pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
487 __func__, *pua, entry, ret);
489 mm_iommu_mapped_dec(mem);
494 static int tce_iommu_clear(struct tce_container *container,
495 struct iommu_table *tbl,
496 unsigned long entry, unsigned long pages)
498 unsigned long oldhpa;
500 enum dma_data_direction direction;
502 for ( ; pages; --pages, ++entry) {
503 direction = DMA_NONE;
505 ret = iommu_tce_xchg(tbl, entry, &oldhpa, &direction);
509 if (direction == DMA_NONE)
513 tce_iommu_unuse_page_v2(container, tbl, entry);
517 tce_iommu_unuse_page(container, oldhpa);
523 static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
525 struct page *page = NULL;
526 enum dma_data_direction direction = iommu_tce_direction(tce);
528 if (get_user_pages_fast(tce & PAGE_MASK, 1,
529 direction != DMA_TO_DEVICE, &page) != 1)
532 *hpa = __pa((unsigned long) page_address(page));
537 static long tce_iommu_build(struct tce_container *container,
538 struct iommu_table *tbl,
539 unsigned long entry, unsigned long tce, unsigned long pages,
540 enum dma_data_direction direction)
545 enum dma_data_direction dirtmp;
547 for (i = 0; i < pages; ++i) {
548 unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
550 ret = tce_iommu_use_page(tce, &hpa);
554 page = pfn_to_page(hpa >> PAGE_SHIFT);
555 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
562 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
564 tce_iommu_unuse_page(container, hpa);
565 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
566 __func__, entry << tbl->it_page_shift,
571 if (dirtmp != DMA_NONE)
572 tce_iommu_unuse_page(container, hpa);
574 tce += IOMMU_PAGE_SIZE(tbl);
578 tce_iommu_clear(container, tbl, entry, i);
583 static long tce_iommu_build_v2(struct tce_container *container,
584 struct iommu_table *tbl,
585 unsigned long entry, unsigned long tce, unsigned long pages,
586 enum dma_data_direction direction)
591 enum dma_data_direction dirtmp;
593 if (!tbl->it_userspace) {
594 ret = tce_iommu_userspace_view_alloc(tbl, container->mm);
599 for (i = 0; i < pages; ++i) {
600 struct mm_iommu_table_group_mem_t *mem = NULL;
601 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
604 ret = tce_iommu_prereg_ua_to_hpa(container,
605 tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
609 page = pfn_to_page(hpa >> PAGE_SHIFT);
610 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
615 /* Preserve offset within IOMMU page */
616 hpa |= tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
619 /* The registered region is being unregistered */
620 if (mm_iommu_mapped_inc(mem))
623 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
625 /* dirtmp cannot be DMA_NONE here */
626 tce_iommu_unuse_page_v2(container, tbl, entry + i);
627 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
628 __func__, entry << tbl->it_page_shift,
633 if (dirtmp != DMA_NONE)
634 tce_iommu_unuse_page_v2(container, tbl, entry + i);
638 tce += IOMMU_PAGE_SIZE(tbl);
642 tce_iommu_clear(container, tbl, entry, i);
647 static long tce_iommu_create_table(struct tce_container *container,
648 struct iommu_table_group *table_group,
653 struct iommu_table **ptbl)
655 long ret, table_size;
657 table_size = table_group->ops->get_table_size(page_shift, window_size,
662 ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT);
666 ret = table_group->ops->create_table(table_group, num,
667 page_shift, window_size, levels, ptbl);
669 WARN_ON(!ret && !(*ptbl)->it_ops->free);
670 WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
675 static void tce_iommu_free_table(struct tce_container *container,
676 struct iommu_table *tbl)
678 unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
680 tce_iommu_userspace_view_free(tbl, container->mm);
681 tbl->it_ops->free(tbl);
682 decrement_locked_vm(container->mm, pages);
685 static long tce_iommu_create_window(struct tce_container *container,
686 __u32 page_shift, __u64 window_size, __u32 levels,
689 struct tce_iommu_group *tcegrp;
690 struct iommu_table_group *table_group;
691 struct iommu_table *tbl = NULL;
694 num = tce_iommu_find_free_table(container);
698 /* Get the first group for ops::create_table */
699 tcegrp = list_first_entry(&container->group_list,
700 struct tce_iommu_group, next);
701 table_group = iommu_group_get_iommudata(tcegrp->grp);
705 if (!(table_group->pgsizes & (1ULL << page_shift)))
708 if (!table_group->ops->set_window || !table_group->ops->unset_window ||
709 !table_group->ops->get_table_size ||
710 !table_group->ops->create_table)
713 /* Create TCE table */
714 ret = tce_iommu_create_table(container, table_group, num,
715 page_shift, window_size, levels, &tbl);
719 BUG_ON(!tbl->it_ops->free);
722 * Program the table to every group.
723 * Groups have been tested for compatibility at the attach time.
725 list_for_each_entry(tcegrp, &container->group_list, next) {
726 table_group = iommu_group_get_iommudata(tcegrp->grp);
728 ret = table_group->ops->set_window(table_group, num, tbl);
733 container->tables[num] = tbl;
735 /* Return start address assigned by platform in create_table() */
736 *start_addr = tbl->it_offset << tbl->it_page_shift;
741 list_for_each_entry(tcegrp, &container->group_list, next) {
742 table_group = iommu_group_get_iommudata(tcegrp->grp);
743 table_group->ops->unset_window(table_group, num);
745 tce_iommu_free_table(container, tbl);
750 static long tce_iommu_remove_window(struct tce_container *container,
753 struct iommu_table_group *table_group = NULL;
754 struct iommu_table *tbl;
755 struct tce_iommu_group *tcegrp;
758 num = tce_iommu_find_table(container, start_addr, &tbl);
762 BUG_ON(!tbl->it_size);
764 /* Detach groups from IOMMUs */
765 list_for_each_entry(tcegrp, &container->group_list, next) {
766 table_group = iommu_group_get_iommudata(tcegrp->grp);
769 * SPAPR TCE IOMMU exposes the default DMA window to
770 * the guest via dma32_window_start/size of
771 * VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow
772 * the userspace to remove this window, some do not so
773 * here we check for the platform capability.
775 if (!table_group->ops || !table_group->ops->unset_window)
778 table_group->ops->unset_window(table_group, num);
782 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
783 tce_iommu_free_table(container, tbl);
784 container->tables[num] = NULL;
789 static long tce_iommu_create_default_window(struct tce_container *container)
792 __u64 start_addr = 0;
793 struct tce_iommu_group *tcegrp;
794 struct iommu_table_group *table_group;
796 if (!container->def_window_pending)
799 if (!tce_groups_attached(container))
802 tcegrp = list_first_entry(&container->group_list,
803 struct tce_iommu_group, next);
804 table_group = iommu_group_get_iommudata(tcegrp->grp);
808 ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K,
809 table_group->tce32_size, 1, &start_addr);
810 WARN_ON_ONCE(!ret && start_addr);
813 container->def_window_pending = false;
818 static long tce_iommu_ioctl(void *iommu_data,
819 unsigned int cmd, unsigned long arg)
821 struct tce_container *container = iommu_data;
822 unsigned long minsz, ddwsz;
826 case VFIO_CHECK_EXTENSION:
828 case VFIO_SPAPR_TCE_IOMMU:
829 case VFIO_SPAPR_TCE_v2_IOMMU:
833 ret = vfio_spapr_iommu_eeh_ioctl(NULL, cmd, arg);
837 return (ret < 0) ? 0 : ret;
841 * Sanity check to prevent one userspace from manipulating
842 * another userspace mm.
845 if (container->mm && container->mm != current->mm)
849 case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
850 struct vfio_iommu_spapr_tce_info info;
851 struct tce_iommu_group *tcegrp;
852 struct iommu_table_group *table_group;
854 if (!tce_groups_attached(container))
857 tcegrp = list_first_entry(&container->group_list,
858 struct tce_iommu_group, next);
859 table_group = iommu_group_get_iommudata(tcegrp->grp);
864 minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
867 if (copy_from_user(&info, (void __user *)arg, minsz))
870 if (info.argsz < minsz)
873 info.dma32_window_start = table_group->tce32_start;
874 info.dma32_window_size = table_group->tce32_size;
876 memset(&info.ddw, 0, sizeof(info.ddw));
878 if (table_group->max_dynamic_windows_supported &&
880 info.flags |= VFIO_IOMMU_SPAPR_INFO_DDW;
881 info.ddw.pgsizes = table_group->pgsizes;
882 info.ddw.max_dynamic_windows_supported =
883 table_group->max_dynamic_windows_supported;
884 info.ddw.levels = table_group->max_levels;
887 ddwsz = offsetofend(struct vfio_iommu_spapr_tce_info, ddw);
889 if (info.argsz >= ddwsz)
892 if (copy_to_user((void __user *)arg, &info, minsz))
897 case VFIO_IOMMU_MAP_DMA: {
898 struct vfio_iommu_type1_dma_map param;
899 struct iommu_table *tbl = NULL;
901 enum dma_data_direction direction;
903 if (!container->enabled)
906 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
908 if (copy_from_user(¶m, (void __user *)arg, minsz))
911 if (param.argsz < minsz)
914 if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ |
915 VFIO_DMA_MAP_FLAG_WRITE))
918 ret = tce_iommu_create_default_window(container);
922 num = tce_iommu_find_table(container, param.iova, &tbl);
926 if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
927 (param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
930 /* iova is checked by the IOMMU API */
931 if (param.flags & VFIO_DMA_MAP_FLAG_READ) {
932 if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
933 direction = DMA_BIDIRECTIONAL;
935 direction = DMA_TO_DEVICE;
937 if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
938 direction = DMA_FROM_DEVICE;
943 ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr);
948 ret = tce_iommu_build_v2(container, tbl,
949 param.iova >> tbl->it_page_shift,
951 param.size >> tbl->it_page_shift,
954 ret = tce_iommu_build(container, tbl,
955 param.iova >> tbl->it_page_shift,
957 param.size >> tbl->it_page_shift,
960 iommu_flush_tce(tbl);
964 case VFIO_IOMMU_UNMAP_DMA: {
965 struct vfio_iommu_type1_dma_unmap param;
966 struct iommu_table *tbl = NULL;
969 if (!container->enabled)
972 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
975 if (copy_from_user(¶m, (void __user *)arg, minsz))
978 if (param.argsz < minsz)
981 /* No flag is supported now */
985 ret = tce_iommu_create_default_window(container);
989 num = tce_iommu_find_table(container, param.iova, &tbl);
993 if (param.size & ~IOMMU_PAGE_MASK(tbl))
996 ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
997 param.size >> tbl->it_page_shift);
1001 ret = tce_iommu_clear(container, tbl,
1002 param.iova >> tbl->it_page_shift,
1003 param.size >> tbl->it_page_shift);
1004 iommu_flush_tce(tbl);
1008 case VFIO_IOMMU_SPAPR_REGISTER_MEMORY: {
1009 struct vfio_iommu_spapr_register_memory param;
1014 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
1017 ret = tce_iommu_mm_set(container);
1021 if (copy_from_user(¶m, (void __user *)arg, minsz))
1024 if (param.argsz < minsz)
1027 /* No flag is supported now */
1031 mutex_lock(&container->lock);
1032 ret = tce_iommu_register_pages(container, param.vaddr,
1034 mutex_unlock(&container->lock);
1038 case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY: {
1039 struct vfio_iommu_spapr_register_memory param;
1047 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
1050 if (copy_from_user(¶m, (void __user *)arg, minsz))
1053 if (param.argsz < minsz)
1056 /* No flag is supported now */
1060 mutex_lock(&container->lock);
1061 ret = tce_iommu_unregister_pages(container, param.vaddr,
1063 mutex_unlock(&container->lock);
1067 case VFIO_IOMMU_ENABLE:
1071 mutex_lock(&container->lock);
1072 ret = tce_iommu_enable(container);
1073 mutex_unlock(&container->lock);
1077 case VFIO_IOMMU_DISABLE:
1081 mutex_lock(&container->lock);
1082 tce_iommu_disable(container);
1083 mutex_unlock(&container->lock);
1086 case VFIO_EEH_PE_OP: {
1087 struct tce_iommu_group *tcegrp;
1090 list_for_each_entry(tcegrp, &container->group_list, next) {
1091 ret = vfio_spapr_iommu_eeh_ioctl(tcegrp->grp,
1099 case VFIO_IOMMU_SPAPR_TCE_CREATE: {
1100 struct vfio_iommu_spapr_tce_create create;
1105 ret = tce_iommu_mm_set(container);
1109 if (!tce_groups_attached(container))
1112 minsz = offsetofend(struct vfio_iommu_spapr_tce_create,
1115 if (copy_from_user(&create, (void __user *)arg, minsz))
1118 if (create.argsz < minsz)
1124 mutex_lock(&container->lock);
1126 ret = tce_iommu_create_default_window(container);
1128 ret = tce_iommu_create_window(container,
1130 create.window_size, create.levels,
1131 &create.start_addr);
1133 mutex_unlock(&container->lock);
1135 if (!ret && copy_to_user((void __user *)arg, &create, minsz))
1140 case VFIO_IOMMU_SPAPR_TCE_REMOVE: {
1141 struct vfio_iommu_spapr_tce_remove remove;
1146 ret = tce_iommu_mm_set(container);
1150 if (!tce_groups_attached(container))
1153 minsz = offsetofend(struct vfio_iommu_spapr_tce_remove,
1156 if (copy_from_user(&remove, (void __user *)arg, minsz))
1159 if (remove.argsz < minsz)
1165 if (container->def_window_pending && !remove.start_addr) {
1166 container->def_window_pending = false;
1170 mutex_lock(&container->lock);
1172 ret = tce_iommu_remove_window(container, remove.start_addr);
1174 mutex_unlock(&container->lock);
1183 static void tce_iommu_release_ownership(struct tce_container *container,
1184 struct iommu_table_group *table_group)
1188 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1189 struct iommu_table *tbl = container->tables[i];
1194 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
1195 tce_iommu_userspace_view_free(tbl, container->mm);
1197 iommu_release_ownership(tbl);
1199 container->tables[i] = NULL;
1203 static int tce_iommu_take_ownership(struct tce_container *container,
1204 struct iommu_table_group *table_group)
1208 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1209 struct iommu_table *tbl = table_group->tables[i];
1211 if (!tbl || !tbl->it_map)
1214 rc = iommu_take_ownership(tbl);
1216 for (j = 0; j < i; ++j)
1217 iommu_release_ownership(
1218 table_group->tables[j]);
1224 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1225 container->tables[i] = table_group->tables[i];
1230 static void tce_iommu_release_ownership_ddw(struct tce_container *container,
1231 struct iommu_table_group *table_group)
1235 if (!table_group->ops->unset_window) {
1240 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1241 table_group->ops->unset_window(table_group, i);
1243 table_group->ops->release_ownership(table_group);
1246 static long tce_iommu_take_ownership_ddw(struct tce_container *container,
1247 struct iommu_table_group *table_group)
1251 if (!table_group->ops->create_table || !table_group->ops->set_window ||
1252 !table_group->ops->release_ownership) {
1257 table_group->ops->take_ownership(table_group);
1259 /* Set all windows to the new group */
1260 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1261 struct iommu_table *tbl = container->tables[i];
1266 ret = table_group->ops->set_window(table_group, i, tbl);
1274 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1275 table_group->ops->unset_window(table_group, i);
1277 table_group->ops->release_ownership(table_group);
1282 static int tce_iommu_attach_group(void *iommu_data,
1283 struct iommu_group *iommu_group)
1286 struct tce_container *container = iommu_data;
1287 struct iommu_table_group *table_group;
1288 struct tce_iommu_group *tcegrp = NULL;
1290 mutex_lock(&container->lock);
1292 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
1293 iommu_group_id(iommu_group), iommu_group); */
1294 table_group = iommu_group_get_iommudata(iommu_group);
1300 if (tce_groups_attached(container) && (!table_group->ops ||
1301 !table_group->ops->take_ownership ||
1302 !table_group->ops->release_ownership)) {
1307 /* Check if new group has the same iommu_ops (i.e. compatible) */
1308 list_for_each_entry(tcegrp, &container->group_list, next) {
1309 struct iommu_table_group *table_group_tmp;
1311 if (tcegrp->grp == iommu_group) {
1312 pr_warn("tce_vfio: Group %d is already attached\n",
1313 iommu_group_id(iommu_group));
1317 table_group_tmp = iommu_group_get_iommudata(tcegrp->grp);
1318 if (table_group_tmp->ops->create_table !=
1319 table_group->ops->create_table) {
1320 pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
1321 iommu_group_id(iommu_group),
1322 iommu_group_id(tcegrp->grp));
1328 tcegrp = kzalloc(sizeof(*tcegrp), GFP_KERNEL);
1334 if (!table_group->ops || !table_group->ops->take_ownership ||
1335 !table_group->ops->release_ownership) {
1336 if (container->v2) {
1340 ret = tce_iommu_take_ownership(container, table_group);
1342 if (!container->v2) {
1346 ret = tce_iommu_take_ownership_ddw(container, table_group);
1347 if (!tce_groups_attached(container) && !container->tables[0])
1348 container->def_window_pending = true;
1352 tcegrp->grp = iommu_group;
1353 list_add(&tcegrp->next, &container->group_list);
1360 mutex_unlock(&container->lock);
1365 static void tce_iommu_detach_group(void *iommu_data,
1366 struct iommu_group *iommu_group)
1368 struct tce_container *container = iommu_data;
1369 struct iommu_table_group *table_group;
1371 struct tce_iommu_group *tcegrp;
1373 mutex_lock(&container->lock);
1375 list_for_each_entry(tcegrp, &container->group_list, next) {
1376 if (tcegrp->grp == iommu_group) {
1383 pr_warn("tce_vfio: detaching unattached group #%u\n",
1384 iommu_group_id(iommu_group));
1388 list_del(&tcegrp->next);
1391 table_group = iommu_group_get_iommudata(iommu_group);
1392 BUG_ON(!table_group);
1394 if (!table_group->ops || !table_group->ops->release_ownership)
1395 tce_iommu_release_ownership(container, table_group);
1397 tce_iommu_release_ownership_ddw(container, table_group);
1400 mutex_unlock(&container->lock);
1403 const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
1404 .name = "iommu-vfio-powerpc",
1405 .owner = THIS_MODULE,
1406 .open = tce_iommu_open,
1407 .release = tce_iommu_release,
1408 .ioctl = tce_iommu_ioctl,
1409 .attach_group = tce_iommu_attach_group,
1410 .detach_group = tce_iommu_detach_group,
1413 static int __init tce_iommu_init(void)
1415 return vfio_register_iommu_driver(&tce_iommu_driver_ops);
1418 static void __exit tce_iommu_cleanup(void)
1420 vfio_unregister_iommu_driver(&tce_iommu_driver_ops);
1423 module_init(tce_iommu_init);
1424 module_exit(tce_iommu_cleanup);
1426 MODULE_VERSION(DRIVER_VERSION);
1427 MODULE_LICENSE("GPL v2");
1428 MODULE_AUTHOR(DRIVER_AUTHOR);
1429 MODULE_DESCRIPTION(DRIVER_DESC);