2 * VFIO: IOMMU DMA mapping support for TCE on POWER
4 * Copyright (C) 2013 IBM Corp. All rights reserved.
5 * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio_iommu_type1.c:
12 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
13 * Author: Alex Williamson <alex.williamson@redhat.com>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <linux/err.h>
21 #include <linux/vfio.h>
22 #include <linux/vmalloc.h>
23 #include <linux/sched/mm.h>
24 #include <linux/sched/signal.h>
26 #include <asm/iommu.h>
28 #include <asm/mmu_context.h>
30 #define DRIVER_VERSION "0.1"
31 #define DRIVER_AUTHOR "aik@ozlabs.ru"
32 #define DRIVER_DESC "VFIO IOMMU SPAPR TCE"
34 static void tce_iommu_detach_group(void *iommu_data,
35 struct iommu_group *iommu_group);
37 static long try_increment_locked_vm(struct mm_struct *mm, long npages)
39 long ret = 0, locked, lock_limit;
41 if (WARN_ON_ONCE(!mm))
47 down_write(&mm->mmap_sem);
48 locked = mm->locked_vm + npages;
49 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
50 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
53 mm->locked_vm += npages;
55 pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
57 mm->locked_vm << PAGE_SHIFT,
58 rlimit(RLIMIT_MEMLOCK),
59 ret ? " - exceeded" : "");
61 up_write(&mm->mmap_sem);
66 static void decrement_locked_vm(struct mm_struct *mm, long npages)
71 down_write(&mm->mmap_sem);
72 if (WARN_ON_ONCE(npages > mm->locked_vm))
73 npages = mm->locked_vm;
74 mm->locked_vm -= npages;
75 pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
77 mm->locked_vm << PAGE_SHIFT,
78 rlimit(RLIMIT_MEMLOCK));
79 up_write(&mm->mmap_sem);
83 * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
85 * This code handles mapping and unmapping of user data buffers
86 * into DMA'ble space using the IOMMU
89 struct tce_iommu_group {
90 struct list_head next;
91 struct iommu_group *grp;
95 * A container needs to remember which preregistered region it has
96 * referenced to do proper cleanup at the userspace process exit.
98 struct tce_iommu_prereg {
99 struct list_head next;
100 struct mm_iommu_table_group_mem_t *mem;
104 * The container descriptor supports only a single group per container.
105 * Required by the API as the container is not supplied with the IOMMU group
106 * at the moment of initialization.
108 struct tce_container {
112 bool def_window_pending;
113 unsigned long locked_pages;
114 struct mm_struct *mm;
115 struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
116 struct list_head group_list;
117 struct list_head prereg_list;
120 static long tce_iommu_mm_set(struct tce_container *container)
123 if (container->mm == current->mm)
127 BUG_ON(!current->mm);
128 container->mm = current->mm;
129 atomic_inc(&container->mm->mm_count);
134 static long tce_iommu_prereg_free(struct tce_container *container,
135 struct tce_iommu_prereg *tcemem)
139 ret = mm_iommu_put(container->mm, tcemem->mem);
143 list_del(&tcemem->next);
149 static long tce_iommu_unregister_pages(struct tce_container *container,
150 __u64 vaddr, __u64 size)
152 struct mm_iommu_table_group_mem_t *mem;
153 struct tce_iommu_prereg *tcemem;
156 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
159 mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT);
163 list_for_each_entry(tcemem, &container->prereg_list, next) {
164 if (tcemem->mem == mem) {
173 return tce_iommu_prereg_free(container, tcemem);
176 static long tce_iommu_register_pages(struct tce_container *container,
177 __u64 vaddr, __u64 size)
180 struct mm_iommu_table_group_mem_t *mem = NULL;
181 struct tce_iommu_prereg *tcemem;
182 unsigned long entries = size >> PAGE_SHIFT;
184 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
185 ((vaddr + size) < vaddr))
188 mem = mm_iommu_find(container->mm, vaddr, entries);
190 list_for_each_entry(tcemem, &container->prereg_list, next) {
191 if (tcemem->mem == mem)
196 ret = mm_iommu_get(container->mm, vaddr, entries, &mem);
200 tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
202 mm_iommu_put(container->mm, mem);
207 list_add(&tcemem->next, &container->prereg_list);
209 container->enabled = true;
214 static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl,
215 struct mm_struct *mm)
217 unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
218 tbl->it_size, PAGE_SIZE);
222 BUG_ON(tbl->it_userspace);
224 ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT);
230 decrement_locked_vm(mm, cb >> PAGE_SHIFT);
233 tbl->it_userspace = uas;
238 static void tce_iommu_userspace_view_free(struct iommu_table *tbl,
239 struct mm_struct *mm)
241 unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
242 tbl->it_size, PAGE_SIZE);
244 if (!tbl->it_userspace)
247 vfree(tbl->it_userspace);
248 tbl->it_userspace = NULL;
249 decrement_locked_vm(mm, cb >> PAGE_SHIFT);
252 static bool tce_page_is_contained(struct page *page, unsigned page_shift)
255 * Check that the TCE table granularity is not bigger than the size of
256 * a page we just found. Otherwise the hardware can get access to
257 * a bigger memory chunk that it should.
259 return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift;
262 static inline bool tce_groups_attached(struct tce_container *container)
264 return !list_empty(&container->group_list);
267 static long tce_iommu_find_table(struct tce_container *container,
268 phys_addr_t ioba, struct iommu_table **ptbl)
272 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
273 struct iommu_table *tbl = container->tables[i];
276 unsigned long entry = ioba >> tbl->it_page_shift;
277 unsigned long start = tbl->it_offset;
278 unsigned long end = start + tbl->it_size;
280 if ((start <= entry) && (entry < end)) {
290 static int tce_iommu_find_free_table(struct tce_container *container)
294 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
295 if (!container->tables[i])
302 static int tce_iommu_enable(struct tce_container *container)
305 unsigned long locked;
306 struct iommu_table_group *table_group;
307 struct tce_iommu_group *tcegrp;
309 if (container->enabled)
313 * When userspace pages are mapped into the IOMMU, they are effectively
314 * locked memory, so, theoretically, we need to update the accounting
315 * of locked pages on each map and unmap. For powerpc, the map unmap
316 * paths can be very hot, though, and the accounting would kill
317 * performance, especially since it would be difficult to impossible
318 * to handle the accounting in real mode only.
320 * To address that, rather than precisely accounting every page, we
321 * instead account for a worst case on locked memory when the iommu is
322 * enabled and disabled. The worst case upper bound on locked memory
323 * is the size of the whole iommu window, which is usually relatively
324 * small (compared to total memory sizes) on POWER hardware.
326 * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
327 * that would effectively kill the guest at random points, much better
328 * enforcing the limit based on the max that the guest can map.
330 * Unfortunately at the moment it counts whole tables, no matter how
331 * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
332 * each with 2GB DMA window, 8GB will be counted here. The reason for
333 * this is that we cannot tell here the amount of RAM used by the guest
334 * as this information is only available from KVM and VFIO is
337 * So we do not allow enabling a container without a group attached
338 * as there is no way to know how much we should increment
339 * the locked_vm counter.
341 if (!tce_groups_attached(container))
344 tcegrp = list_first_entry(&container->group_list,
345 struct tce_iommu_group, next);
346 table_group = iommu_group_get_iommudata(tcegrp->grp);
350 if (!table_group->tce32_size)
353 ret = tce_iommu_mm_set(container);
357 locked = table_group->tce32_size >> PAGE_SHIFT;
358 ret = try_increment_locked_vm(container->mm, locked);
362 container->locked_pages = locked;
364 container->enabled = true;
369 static void tce_iommu_disable(struct tce_container *container)
371 if (!container->enabled)
374 container->enabled = false;
376 BUG_ON(!container->mm);
377 decrement_locked_vm(container->mm, container->locked_pages);
380 static void *tce_iommu_open(unsigned long arg)
382 struct tce_container *container;
384 if ((arg != VFIO_SPAPR_TCE_IOMMU) && (arg != VFIO_SPAPR_TCE_v2_IOMMU)) {
385 pr_err("tce_vfio: Wrong IOMMU type\n");
386 return ERR_PTR(-EINVAL);
389 container = kzalloc(sizeof(*container), GFP_KERNEL);
391 return ERR_PTR(-ENOMEM);
393 mutex_init(&container->lock);
394 INIT_LIST_HEAD_RCU(&container->group_list);
395 INIT_LIST_HEAD_RCU(&container->prereg_list);
397 container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
402 static int tce_iommu_clear(struct tce_container *container,
403 struct iommu_table *tbl,
404 unsigned long entry, unsigned long pages);
405 static void tce_iommu_free_table(struct tce_container *container,
406 struct iommu_table *tbl);
408 static void tce_iommu_release(void *iommu_data)
410 struct tce_container *container = iommu_data;
411 struct tce_iommu_group *tcegrp;
412 struct tce_iommu_prereg *tcemem, *tmtmp;
415 while (tce_groups_attached(container)) {
416 tcegrp = list_first_entry(&container->group_list,
417 struct tce_iommu_group, next);
418 tce_iommu_detach_group(iommu_data, tcegrp->grp);
422 * If VFIO created a table, it was not disposed
423 * by tce_iommu_detach_group() so do it now.
425 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
426 struct iommu_table *tbl = container->tables[i];
431 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
432 tce_iommu_free_table(container, tbl);
435 list_for_each_entry_safe(tcemem, tmtmp, &container->prereg_list, next)
436 WARN_ON(tce_iommu_prereg_free(container, tcemem));
438 tce_iommu_disable(container);
440 mmdrop(container->mm);
441 mutex_destroy(&container->lock);
446 static void tce_iommu_unuse_page(struct tce_container *container,
451 page = pfn_to_page(hpa >> PAGE_SHIFT);
455 static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
456 unsigned long tce, unsigned long shift,
457 unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
460 struct mm_iommu_table_group_mem_t *mem;
462 mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift);
466 ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa);
475 static void tce_iommu_unuse_page_v2(struct tce_container *container,
476 struct iommu_table *tbl, unsigned long entry)
478 struct mm_iommu_table_group_mem_t *mem = NULL;
480 unsigned long hpa = 0;
481 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
486 ret = tce_iommu_prereg_ua_to_hpa(container, *pua, tbl->it_page_shift,
489 pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
490 __func__, *pua, entry, ret);
492 mm_iommu_mapped_dec(mem);
497 static int tce_iommu_clear(struct tce_container *container,
498 struct iommu_table *tbl,
499 unsigned long entry, unsigned long pages)
501 unsigned long oldhpa;
503 enum dma_data_direction direction;
505 for ( ; pages; --pages, ++entry) {
506 direction = DMA_NONE;
508 ret = iommu_tce_xchg(tbl, entry, &oldhpa, &direction);
512 if (direction == DMA_NONE)
516 tce_iommu_unuse_page_v2(container, tbl, entry);
520 tce_iommu_unuse_page(container, oldhpa);
526 static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
528 struct page *page = NULL;
529 enum dma_data_direction direction = iommu_tce_direction(tce);
531 if (get_user_pages_fast(tce & PAGE_MASK, 1,
532 direction != DMA_TO_DEVICE, &page) != 1)
535 *hpa = __pa((unsigned long) page_address(page));
540 static long tce_iommu_build(struct tce_container *container,
541 struct iommu_table *tbl,
542 unsigned long entry, unsigned long tce, unsigned long pages,
543 enum dma_data_direction direction)
548 enum dma_data_direction dirtmp;
550 for (i = 0; i < pages; ++i) {
551 unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
553 ret = tce_iommu_use_page(tce, &hpa);
557 page = pfn_to_page(hpa >> PAGE_SHIFT);
558 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
565 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
567 tce_iommu_unuse_page(container, hpa);
568 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
569 __func__, entry << tbl->it_page_shift,
574 if (dirtmp != DMA_NONE)
575 tce_iommu_unuse_page(container, hpa);
577 tce += IOMMU_PAGE_SIZE(tbl);
581 tce_iommu_clear(container, tbl, entry, i);
586 static long tce_iommu_build_v2(struct tce_container *container,
587 struct iommu_table *tbl,
588 unsigned long entry, unsigned long tce, unsigned long pages,
589 enum dma_data_direction direction)
594 enum dma_data_direction dirtmp;
596 if (!tbl->it_userspace) {
597 ret = tce_iommu_userspace_view_alloc(tbl, container->mm);
602 for (i = 0; i < pages; ++i) {
603 struct mm_iommu_table_group_mem_t *mem = NULL;
604 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
607 ret = tce_iommu_prereg_ua_to_hpa(container,
608 tce, tbl->it_page_shift, &hpa, &mem);
612 page = pfn_to_page(hpa >> PAGE_SHIFT);
613 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
618 /* Preserve offset within IOMMU page */
619 hpa |= tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
622 /* The registered region is being unregistered */
623 if (mm_iommu_mapped_inc(mem))
626 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
628 /* dirtmp cannot be DMA_NONE here */
629 tce_iommu_unuse_page_v2(container, tbl, entry + i);
630 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
631 __func__, entry << tbl->it_page_shift,
636 if (dirtmp != DMA_NONE)
637 tce_iommu_unuse_page_v2(container, tbl, entry + i);
641 tce += IOMMU_PAGE_SIZE(tbl);
645 tce_iommu_clear(container, tbl, entry, i);
650 static long tce_iommu_create_table(struct tce_container *container,
651 struct iommu_table_group *table_group,
656 struct iommu_table **ptbl)
658 long ret, table_size;
660 table_size = table_group->ops->get_table_size(page_shift, window_size,
665 ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT);
669 ret = table_group->ops->create_table(table_group, num,
670 page_shift, window_size, levels, ptbl);
672 WARN_ON(!ret && !(*ptbl)->it_ops->free);
673 WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
678 static void tce_iommu_free_table(struct tce_container *container,
679 struct iommu_table *tbl)
681 unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
683 tce_iommu_userspace_view_free(tbl, container->mm);
684 iommu_tce_table_put(tbl);
685 decrement_locked_vm(container->mm, pages);
688 static long tce_iommu_create_window(struct tce_container *container,
689 __u32 page_shift, __u64 window_size, __u32 levels,
692 struct tce_iommu_group *tcegrp;
693 struct iommu_table_group *table_group;
694 struct iommu_table *tbl = NULL;
697 num = tce_iommu_find_free_table(container);
701 /* Get the first group for ops::create_table */
702 tcegrp = list_first_entry(&container->group_list,
703 struct tce_iommu_group, next);
704 table_group = iommu_group_get_iommudata(tcegrp->grp);
708 if (!(table_group->pgsizes & (1ULL << page_shift)))
711 if (!table_group->ops->set_window || !table_group->ops->unset_window ||
712 !table_group->ops->get_table_size ||
713 !table_group->ops->create_table)
716 /* Create TCE table */
717 ret = tce_iommu_create_table(container, table_group, num,
718 page_shift, window_size, levels, &tbl);
722 BUG_ON(!tbl->it_ops->free);
725 * Program the table to every group.
726 * Groups have been tested for compatibility at the attach time.
728 list_for_each_entry(tcegrp, &container->group_list, next) {
729 table_group = iommu_group_get_iommudata(tcegrp->grp);
731 ret = table_group->ops->set_window(table_group, num, tbl);
736 container->tables[num] = tbl;
738 /* Return start address assigned by platform in create_table() */
739 *start_addr = tbl->it_offset << tbl->it_page_shift;
744 list_for_each_entry(tcegrp, &container->group_list, next) {
745 table_group = iommu_group_get_iommudata(tcegrp->grp);
746 table_group->ops->unset_window(table_group, num);
748 tce_iommu_free_table(container, tbl);
753 static long tce_iommu_remove_window(struct tce_container *container,
756 struct iommu_table_group *table_group = NULL;
757 struct iommu_table *tbl;
758 struct tce_iommu_group *tcegrp;
761 num = tce_iommu_find_table(container, start_addr, &tbl);
765 BUG_ON(!tbl->it_size);
767 /* Detach groups from IOMMUs */
768 list_for_each_entry(tcegrp, &container->group_list, next) {
769 table_group = iommu_group_get_iommudata(tcegrp->grp);
772 * SPAPR TCE IOMMU exposes the default DMA window to
773 * the guest via dma32_window_start/size of
774 * VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow
775 * the userspace to remove this window, some do not so
776 * here we check for the platform capability.
778 if (!table_group->ops || !table_group->ops->unset_window)
781 table_group->ops->unset_window(table_group, num);
785 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
786 tce_iommu_free_table(container, tbl);
787 container->tables[num] = NULL;
792 static long tce_iommu_create_default_window(struct tce_container *container)
795 __u64 start_addr = 0;
796 struct tce_iommu_group *tcegrp;
797 struct iommu_table_group *table_group;
799 if (!container->def_window_pending)
802 if (!tce_groups_attached(container))
805 tcegrp = list_first_entry(&container->group_list,
806 struct tce_iommu_group, next);
807 table_group = iommu_group_get_iommudata(tcegrp->grp);
811 ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K,
812 table_group->tce32_size, 1, &start_addr);
813 WARN_ON_ONCE(!ret && start_addr);
816 container->def_window_pending = false;
821 static long tce_iommu_ioctl(void *iommu_data,
822 unsigned int cmd, unsigned long arg)
824 struct tce_container *container = iommu_data;
825 unsigned long minsz, ddwsz;
829 case VFIO_CHECK_EXTENSION:
831 case VFIO_SPAPR_TCE_IOMMU:
832 case VFIO_SPAPR_TCE_v2_IOMMU:
836 ret = vfio_spapr_iommu_eeh_ioctl(NULL, cmd, arg);
840 return (ret < 0) ? 0 : ret;
844 * Sanity check to prevent one userspace from manipulating
845 * another userspace mm.
848 if (container->mm && container->mm != current->mm)
852 case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
853 struct vfio_iommu_spapr_tce_info info;
854 struct tce_iommu_group *tcegrp;
855 struct iommu_table_group *table_group;
857 if (!tce_groups_attached(container))
860 tcegrp = list_first_entry(&container->group_list,
861 struct tce_iommu_group, next);
862 table_group = iommu_group_get_iommudata(tcegrp->grp);
867 minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
870 if (copy_from_user(&info, (void __user *)arg, minsz))
873 if (info.argsz < minsz)
876 info.dma32_window_start = table_group->tce32_start;
877 info.dma32_window_size = table_group->tce32_size;
879 memset(&info.ddw, 0, sizeof(info.ddw));
881 if (table_group->max_dynamic_windows_supported &&
883 info.flags |= VFIO_IOMMU_SPAPR_INFO_DDW;
884 info.ddw.pgsizes = table_group->pgsizes;
885 info.ddw.max_dynamic_windows_supported =
886 table_group->max_dynamic_windows_supported;
887 info.ddw.levels = table_group->max_levels;
890 ddwsz = offsetofend(struct vfio_iommu_spapr_tce_info, ddw);
892 if (info.argsz >= ddwsz)
895 if (copy_to_user((void __user *)arg, &info, minsz))
900 case VFIO_IOMMU_MAP_DMA: {
901 struct vfio_iommu_type1_dma_map param;
902 struct iommu_table *tbl = NULL;
904 enum dma_data_direction direction;
906 if (!container->enabled)
909 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
911 if (copy_from_user(¶m, (void __user *)arg, minsz))
914 if (param.argsz < minsz)
917 if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ |
918 VFIO_DMA_MAP_FLAG_WRITE))
921 ret = tce_iommu_create_default_window(container);
925 num = tce_iommu_find_table(container, param.iova, &tbl);
929 if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
930 (param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
933 /* iova is checked by the IOMMU API */
934 if (param.flags & VFIO_DMA_MAP_FLAG_READ) {
935 if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
936 direction = DMA_BIDIRECTIONAL;
938 direction = DMA_TO_DEVICE;
940 if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
941 direction = DMA_FROM_DEVICE;
946 ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr);
951 ret = tce_iommu_build_v2(container, tbl,
952 param.iova >> tbl->it_page_shift,
954 param.size >> tbl->it_page_shift,
957 ret = tce_iommu_build(container, tbl,
958 param.iova >> tbl->it_page_shift,
960 param.size >> tbl->it_page_shift,
963 iommu_flush_tce(tbl);
967 case VFIO_IOMMU_UNMAP_DMA: {
968 struct vfio_iommu_type1_dma_unmap param;
969 struct iommu_table *tbl = NULL;
972 if (!container->enabled)
975 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
978 if (copy_from_user(¶m, (void __user *)arg, minsz))
981 if (param.argsz < minsz)
984 /* No flag is supported now */
988 ret = tce_iommu_create_default_window(container);
992 num = tce_iommu_find_table(container, param.iova, &tbl);
996 if (param.size & ~IOMMU_PAGE_MASK(tbl))
999 ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
1000 param.size >> tbl->it_page_shift);
1004 ret = tce_iommu_clear(container, tbl,
1005 param.iova >> tbl->it_page_shift,
1006 param.size >> tbl->it_page_shift);
1007 iommu_flush_tce(tbl);
1011 case VFIO_IOMMU_SPAPR_REGISTER_MEMORY: {
1012 struct vfio_iommu_spapr_register_memory param;
1017 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
1020 ret = tce_iommu_mm_set(container);
1024 if (copy_from_user(¶m, (void __user *)arg, minsz))
1027 if (param.argsz < minsz)
1030 /* No flag is supported now */
1034 mutex_lock(&container->lock);
1035 ret = tce_iommu_register_pages(container, param.vaddr,
1037 mutex_unlock(&container->lock);
1041 case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY: {
1042 struct vfio_iommu_spapr_register_memory param;
1050 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
1053 if (copy_from_user(¶m, (void __user *)arg, minsz))
1056 if (param.argsz < minsz)
1059 /* No flag is supported now */
1063 mutex_lock(&container->lock);
1064 ret = tce_iommu_unregister_pages(container, param.vaddr,
1066 mutex_unlock(&container->lock);
1070 case VFIO_IOMMU_ENABLE:
1074 mutex_lock(&container->lock);
1075 ret = tce_iommu_enable(container);
1076 mutex_unlock(&container->lock);
1080 case VFIO_IOMMU_DISABLE:
1084 mutex_lock(&container->lock);
1085 tce_iommu_disable(container);
1086 mutex_unlock(&container->lock);
1089 case VFIO_EEH_PE_OP: {
1090 struct tce_iommu_group *tcegrp;
1093 list_for_each_entry(tcegrp, &container->group_list, next) {
1094 ret = vfio_spapr_iommu_eeh_ioctl(tcegrp->grp,
1102 case VFIO_IOMMU_SPAPR_TCE_CREATE: {
1103 struct vfio_iommu_spapr_tce_create create;
1108 ret = tce_iommu_mm_set(container);
1112 if (!tce_groups_attached(container))
1115 minsz = offsetofend(struct vfio_iommu_spapr_tce_create,
1118 if (copy_from_user(&create, (void __user *)arg, minsz))
1121 if (create.argsz < minsz)
1127 mutex_lock(&container->lock);
1129 ret = tce_iommu_create_default_window(container);
1131 ret = tce_iommu_create_window(container,
1133 create.window_size, create.levels,
1134 &create.start_addr);
1136 mutex_unlock(&container->lock);
1138 if (!ret && copy_to_user((void __user *)arg, &create, minsz))
1143 case VFIO_IOMMU_SPAPR_TCE_REMOVE: {
1144 struct vfio_iommu_spapr_tce_remove remove;
1149 ret = tce_iommu_mm_set(container);
1153 if (!tce_groups_attached(container))
1156 minsz = offsetofend(struct vfio_iommu_spapr_tce_remove,
1159 if (copy_from_user(&remove, (void __user *)arg, minsz))
1162 if (remove.argsz < minsz)
1168 if (container->def_window_pending && !remove.start_addr) {
1169 container->def_window_pending = false;
1173 mutex_lock(&container->lock);
1175 ret = tce_iommu_remove_window(container, remove.start_addr);
1177 mutex_unlock(&container->lock);
1186 static void tce_iommu_release_ownership(struct tce_container *container,
1187 struct iommu_table_group *table_group)
1191 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1192 struct iommu_table *tbl = container->tables[i];
1197 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
1198 tce_iommu_userspace_view_free(tbl, container->mm);
1200 iommu_release_ownership(tbl);
1202 container->tables[i] = NULL;
1206 static int tce_iommu_take_ownership(struct tce_container *container,
1207 struct iommu_table_group *table_group)
1211 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1212 struct iommu_table *tbl = table_group->tables[i];
1214 if (!tbl || !tbl->it_map)
1217 rc = iommu_take_ownership(tbl);
1219 for (j = 0; j < i; ++j)
1220 iommu_release_ownership(
1221 table_group->tables[j]);
1227 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1228 container->tables[i] = table_group->tables[i];
1233 static void tce_iommu_release_ownership_ddw(struct tce_container *container,
1234 struct iommu_table_group *table_group)
1238 if (!table_group->ops->unset_window) {
1243 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1244 table_group->ops->unset_window(table_group, i);
1246 table_group->ops->release_ownership(table_group);
1249 static long tce_iommu_take_ownership_ddw(struct tce_container *container,
1250 struct iommu_table_group *table_group)
1254 if (!table_group->ops->create_table || !table_group->ops->set_window ||
1255 !table_group->ops->release_ownership) {
1260 table_group->ops->take_ownership(table_group);
1262 /* Set all windows to the new group */
1263 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1264 struct iommu_table *tbl = container->tables[i];
1269 ret = table_group->ops->set_window(table_group, i, tbl);
1277 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1278 table_group->ops->unset_window(table_group, i);
1280 table_group->ops->release_ownership(table_group);
1285 static int tce_iommu_attach_group(void *iommu_data,
1286 struct iommu_group *iommu_group)
1289 struct tce_container *container = iommu_data;
1290 struct iommu_table_group *table_group;
1291 struct tce_iommu_group *tcegrp = NULL;
1293 mutex_lock(&container->lock);
1295 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
1296 iommu_group_id(iommu_group), iommu_group); */
1297 table_group = iommu_group_get_iommudata(iommu_group);
1303 if (tce_groups_attached(container) && (!table_group->ops ||
1304 !table_group->ops->take_ownership ||
1305 !table_group->ops->release_ownership)) {
1310 /* Check if new group has the same iommu_ops (i.e. compatible) */
1311 list_for_each_entry(tcegrp, &container->group_list, next) {
1312 struct iommu_table_group *table_group_tmp;
1314 if (tcegrp->grp == iommu_group) {
1315 pr_warn("tce_vfio: Group %d is already attached\n",
1316 iommu_group_id(iommu_group));
1320 table_group_tmp = iommu_group_get_iommudata(tcegrp->grp);
1321 if (table_group_tmp->ops->create_table !=
1322 table_group->ops->create_table) {
1323 pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
1324 iommu_group_id(iommu_group),
1325 iommu_group_id(tcegrp->grp));
1331 tcegrp = kzalloc(sizeof(*tcegrp), GFP_KERNEL);
1337 if (!table_group->ops || !table_group->ops->take_ownership ||
1338 !table_group->ops->release_ownership) {
1339 if (container->v2) {
1343 ret = tce_iommu_take_ownership(container, table_group);
1345 if (!container->v2) {
1349 ret = tce_iommu_take_ownership_ddw(container, table_group);
1350 if (!tce_groups_attached(container) && !container->tables[0])
1351 container->def_window_pending = true;
1355 tcegrp->grp = iommu_group;
1356 list_add(&tcegrp->next, &container->group_list);
1363 mutex_unlock(&container->lock);
1368 static void tce_iommu_detach_group(void *iommu_data,
1369 struct iommu_group *iommu_group)
1371 struct tce_container *container = iommu_data;
1372 struct iommu_table_group *table_group;
1374 struct tce_iommu_group *tcegrp;
1376 mutex_lock(&container->lock);
1378 list_for_each_entry(tcegrp, &container->group_list, next) {
1379 if (tcegrp->grp == iommu_group) {
1386 pr_warn("tce_vfio: detaching unattached group #%u\n",
1387 iommu_group_id(iommu_group));
1391 list_del(&tcegrp->next);
1394 table_group = iommu_group_get_iommudata(iommu_group);
1395 BUG_ON(!table_group);
1397 if (!table_group->ops || !table_group->ops->release_ownership)
1398 tce_iommu_release_ownership(container, table_group);
1400 tce_iommu_release_ownership_ddw(container, table_group);
1403 mutex_unlock(&container->lock);
1406 const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
1407 .name = "iommu-vfio-powerpc",
1408 .owner = THIS_MODULE,
1409 .open = tce_iommu_open,
1410 .release = tce_iommu_release,
1411 .ioctl = tce_iommu_ioctl,
1412 .attach_group = tce_iommu_attach_group,
1413 .detach_group = tce_iommu_detach_group,
1416 static int __init tce_iommu_init(void)
1418 return vfio_register_iommu_driver(&tce_iommu_driver_ops);
1421 static void __exit tce_iommu_cleanup(void)
1423 vfio_unregister_iommu_driver(&tce_iommu_driver_ops);
1426 module_init(tce_iommu_init);
1427 module_exit(tce_iommu_cleanup);
1429 MODULE_VERSION(DRIVER_VERSION);
1430 MODULE_LICENSE("GPL v2");
1431 MODULE_AUTHOR(DRIVER_AUTHOR);
1432 MODULE_DESCRIPTION(DRIVER_DESC);