GNU Linux-libre 4.14.302-gnu1
[releases.git] / drivers / vfio / vfio_iommu_spapr_tce.c
1 /*
2  * VFIO: IOMMU DMA mapping support for TCE on POWER
3  *
4  * Copyright (C) 2013 IBM Corp.  All rights reserved.
5  *     Author: Alexey Kardashevskiy <aik@ozlabs.ru>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * Derived from original vfio_iommu_type1.c:
12  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
13  *     Author: Alex Williamson <alex.williamson@redhat.com>
14  */
15
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <linux/err.h>
21 #include <linux/vfio.h>
22 #include <linux/vmalloc.h>
23 #include <linux/sched/mm.h>
24 #include <linux/sched/signal.h>
25
26 #include <asm/iommu.h>
27 #include <asm/tce.h>
28 #include <asm/mmu_context.h>
29
30 #define DRIVER_VERSION  "0.1"
31 #define DRIVER_AUTHOR   "aik@ozlabs.ru"
32 #define DRIVER_DESC     "VFIO IOMMU SPAPR TCE"
33
34 static void tce_iommu_detach_group(void *iommu_data,
35                 struct iommu_group *iommu_group);
36
37 static long try_increment_locked_vm(struct mm_struct *mm, long npages)
38 {
39         long ret = 0, locked, lock_limit;
40
41         if (WARN_ON_ONCE(!mm))
42                 return -EPERM;
43
44         if (!npages)
45                 return 0;
46
47         down_write(&mm->mmap_sem);
48         locked = mm->locked_vm + npages;
49         lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
50         if (locked > lock_limit && !capable(CAP_IPC_LOCK))
51                 ret = -ENOMEM;
52         else
53                 mm->locked_vm += npages;
54
55         pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
56                         npages << PAGE_SHIFT,
57                         mm->locked_vm << PAGE_SHIFT,
58                         rlimit(RLIMIT_MEMLOCK),
59                         ret ? " - exceeded" : "");
60
61         up_write(&mm->mmap_sem);
62
63         return ret;
64 }
65
66 static void decrement_locked_vm(struct mm_struct *mm, long npages)
67 {
68         if (!mm || !npages)
69                 return;
70
71         down_write(&mm->mmap_sem);
72         if (WARN_ON_ONCE(npages > mm->locked_vm))
73                 npages = mm->locked_vm;
74         mm->locked_vm -= npages;
75         pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
76                         npages << PAGE_SHIFT,
77                         mm->locked_vm << PAGE_SHIFT,
78                         rlimit(RLIMIT_MEMLOCK));
79         up_write(&mm->mmap_sem);
80 }
81
82 /*
83  * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
84  *
85  * This code handles mapping and unmapping of user data buffers
86  * into DMA'ble space using the IOMMU
87  */
88
89 struct tce_iommu_group {
90         struct list_head next;
91         struct iommu_group *grp;
92 };
93
94 /*
95  * A container needs to remember which preregistered region  it has
96  * referenced to do proper cleanup at the userspace process exit.
97  */
98 struct tce_iommu_prereg {
99         struct list_head next;
100         struct mm_iommu_table_group_mem_t *mem;
101 };
102
103 /*
104  * The container descriptor supports only a single group per container.
105  * Required by the API as the container is not supplied with the IOMMU group
106  * at the moment of initialization.
107  */
108 struct tce_container {
109         struct mutex lock;
110         bool enabled;
111         bool v2;
112         bool def_window_pending;
113         unsigned long locked_pages;
114         struct mm_struct *mm;
115         struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
116         struct list_head group_list;
117         struct list_head prereg_list;
118 };
119
120 static long tce_iommu_mm_set(struct tce_container *container)
121 {
122         if (container->mm) {
123                 if (container->mm == current->mm)
124                         return 0;
125                 return -EPERM;
126         }
127         BUG_ON(!current->mm);
128         container->mm = current->mm;
129         atomic_inc(&container->mm->mm_count);
130
131         return 0;
132 }
133
134 static long tce_iommu_prereg_free(struct tce_container *container,
135                 struct tce_iommu_prereg *tcemem)
136 {
137         long ret;
138
139         ret = mm_iommu_put(container->mm, tcemem->mem);
140         if (ret)
141                 return ret;
142
143         list_del(&tcemem->next);
144         kfree(tcemem);
145
146         return 0;
147 }
148
149 static long tce_iommu_unregister_pages(struct tce_container *container,
150                 __u64 vaddr, __u64 size)
151 {
152         struct mm_iommu_table_group_mem_t *mem;
153         struct tce_iommu_prereg *tcemem;
154         bool found = false;
155
156         if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
157                 return -EINVAL;
158
159         mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT);
160         if (!mem)
161                 return -ENOENT;
162
163         list_for_each_entry(tcemem, &container->prereg_list, next) {
164                 if (tcemem->mem == mem) {
165                         found = true;
166                         break;
167                 }
168         }
169
170         if (!found)
171                 return -ENOENT;
172
173         return tce_iommu_prereg_free(container, tcemem);
174 }
175
176 static long tce_iommu_register_pages(struct tce_container *container,
177                 __u64 vaddr, __u64 size)
178 {
179         long ret = 0;
180         struct mm_iommu_table_group_mem_t *mem = NULL;
181         struct tce_iommu_prereg *tcemem;
182         unsigned long entries = size >> PAGE_SHIFT;
183
184         if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
185                         ((vaddr + size) < vaddr))
186                 return -EINVAL;
187
188         mem = mm_iommu_find(container->mm, vaddr, entries);
189         if (mem) {
190                 list_for_each_entry(tcemem, &container->prereg_list, next) {
191                         if (tcemem->mem == mem)
192                                 return -EBUSY;
193                 }
194         }
195
196         ret = mm_iommu_get(container->mm, vaddr, entries, &mem);
197         if (ret)
198                 return ret;
199
200         tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
201         if (!tcemem) {
202                 mm_iommu_put(container->mm, mem);
203                 return -ENOMEM;
204         }
205
206         tcemem->mem = mem;
207         list_add(&tcemem->next, &container->prereg_list);
208
209         container->enabled = true;
210
211         return 0;
212 }
213
214 static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl,
215                 struct mm_struct *mm)
216 {
217         unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
218                         tbl->it_size, PAGE_SIZE);
219         unsigned long *uas;
220         long ret;
221
222         BUG_ON(tbl->it_userspace);
223
224         ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT);
225         if (ret)
226                 return ret;
227
228         uas = vzalloc(cb);
229         if (!uas) {
230                 decrement_locked_vm(mm, cb >> PAGE_SHIFT);
231                 return -ENOMEM;
232         }
233         tbl->it_userspace = uas;
234
235         return 0;
236 }
237
238 static void tce_iommu_userspace_view_free(struct iommu_table *tbl,
239                 struct mm_struct *mm)
240 {
241         unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
242                         tbl->it_size, PAGE_SIZE);
243
244         if (!tbl->it_userspace)
245                 return;
246
247         vfree(tbl->it_userspace);
248         tbl->it_userspace = NULL;
249         decrement_locked_vm(mm, cb >> PAGE_SHIFT);
250 }
251
252 static bool tce_page_is_contained(struct page *page, unsigned page_shift)
253 {
254         /*
255          * Check that the TCE table granularity is not bigger than the size of
256          * a page we just found. Otherwise the hardware can get access to
257          * a bigger memory chunk that it should.
258          */
259         return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift;
260 }
261
262 static inline bool tce_groups_attached(struct tce_container *container)
263 {
264         return !list_empty(&container->group_list);
265 }
266
267 static long tce_iommu_find_table(struct tce_container *container,
268                 phys_addr_t ioba, struct iommu_table **ptbl)
269 {
270         long i;
271
272         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
273                 struct iommu_table *tbl = container->tables[i];
274
275                 if (tbl) {
276                         unsigned long entry = ioba >> tbl->it_page_shift;
277                         unsigned long start = tbl->it_offset;
278                         unsigned long end = start + tbl->it_size;
279
280                         if ((start <= entry) && (entry < end)) {
281                                 *ptbl = tbl;
282                                 return i;
283                         }
284                 }
285         }
286
287         return -1;
288 }
289
290 static int tce_iommu_find_free_table(struct tce_container *container)
291 {
292         int i;
293
294         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
295                 if (!container->tables[i])
296                         return i;
297         }
298
299         return -ENOSPC;
300 }
301
302 static int tce_iommu_enable(struct tce_container *container)
303 {
304         int ret = 0;
305         unsigned long locked;
306         struct iommu_table_group *table_group;
307         struct tce_iommu_group *tcegrp;
308
309         if (container->enabled)
310                 return -EBUSY;
311
312         /*
313          * When userspace pages are mapped into the IOMMU, they are effectively
314          * locked memory, so, theoretically, we need to update the accounting
315          * of locked pages on each map and unmap.  For powerpc, the map unmap
316          * paths can be very hot, though, and the accounting would kill
317          * performance, especially since it would be difficult to impossible
318          * to handle the accounting in real mode only.
319          *
320          * To address that, rather than precisely accounting every page, we
321          * instead account for a worst case on locked memory when the iommu is
322          * enabled and disabled.  The worst case upper bound on locked memory
323          * is the size of the whole iommu window, which is usually relatively
324          * small (compared to total memory sizes) on POWER hardware.
325          *
326          * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
327          * that would effectively kill the guest at random points, much better
328          * enforcing the limit based on the max that the guest can map.
329          *
330          * Unfortunately at the moment it counts whole tables, no matter how
331          * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
332          * each with 2GB DMA window, 8GB will be counted here. The reason for
333          * this is that we cannot tell here the amount of RAM used by the guest
334          * as this information is only available from KVM and VFIO is
335          * KVM agnostic.
336          *
337          * So we do not allow enabling a container without a group attached
338          * as there is no way to know how much we should increment
339          * the locked_vm counter.
340          */
341         if (!tce_groups_attached(container))
342                 return -ENODEV;
343
344         tcegrp = list_first_entry(&container->group_list,
345                         struct tce_iommu_group, next);
346         table_group = iommu_group_get_iommudata(tcegrp->grp);
347         if (!table_group)
348                 return -ENODEV;
349
350         if (!table_group->tce32_size)
351                 return -EPERM;
352
353         ret = tce_iommu_mm_set(container);
354         if (ret)
355                 return ret;
356
357         locked = table_group->tce32_size >> PAGE_SHIFT;
358         ret = try_increment_locked_vm(container->mm, locked);
359         if (ret)
360                 return ret;
361
362         container->locked_pages = locked;
363
364         container->enabled = true;
365
366         return ret;
367 }
368
369 static void tce_iommu_disable(struct tce_container *container)
370 {
371         if (!container->enabled)
372                 return;
373
374         container->enabled = false;
375
376         BUG_ON(!container->mm);
377         decrement_locked_vm(container->mm, container->locked_pages);
378 }
379
380 static void *tce_iommu_open(unsigned long arg)
381 {
382         struct tce_container *container;
383
384         if ((arg != VFIO_SPAPR_TCE_IOMMU) && (arg != VFIO_SPAPR_TCE_v2_IOMMU)) {
385                 pr_err("tce_vfio: Wrong IOMMU type\n");
386                 return ERR_PTR(-EINVAL);
387         }
388
389         container = kzalloc(sizeof(*container), GFP_KERNEL);
390         if (!container)
391                 return ERR_PTR(-ENOMEM);
392
393         mutex_init(&container->lock);
394         INIT_LIST_HEAD_RCU(&container->group_list);
395         INIT_LIST_HEAD_RCU(&container->prereg_list);
396
397         container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
398
399         return container;
400 }
401
402 static int tce_iommu_clear(struct tce_container *container,
403                 struct iommu_table *tbl,
404                 unsigned long entry, unsigned long pages);
405 static void tce_iommu_free_table(struct tce_container *container,
406                 struct iommu_table *tbl);
407
408 static void tce_iommu_release(void *iommu_data)
409 {
410         struct tce_container *container = iommu_data;
411         struct tce_iommu_group *tcegrp;
412         struct tce_iommu_prereg *tcemem, *tmtmp;
413         long i;
414
415         while (tce_groups_attached(container)) {
416                 tcegrp = list_first_entry(&container->group_list,
417                                 struct tce_iommu_group, next);
418                 tce_iommu_detach_group(iommu_data, tcegrp->grp);
419         }
420
421         /*
422          * If VFIO created a table, it was not disposed
423          * by tce_iommu_detach_group() so do it now.
424          */
425         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
426                 struct iommu_table *tbl = container->tables[i];
427
428                 if (!tbl)
429                         continue;
430
431                 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
432                 tce_iommu_free_table(container, tbl);
433         }
434
435         list_for_each_entry_safe(tcemem, tmtmp, &container->prereg_list, next)
436                 WARN_ON(tce_iommu_prereg_free(container, tcemem));
437
438         tce_iommu_disable(container);
439         if (container->mm)
440                 mmdrop(container->mm);
441         mutex_destroy(&container->lock);
442
443         kfree(container);
444 }
445
446 static void tce_iommu_unuse_page(struct tce_container *container,
447                 unsigned long hpa)
448 {
449         struct page *page;
450
451         page = pfn_to_page(hpa >> PAGE_SHIFT);
452         put_page(page);
453 }
454
455 static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
456                 unsigned long tce, unsigned long shift,
457                 unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
458 {
459         long ret = 0;
460         struct mm_iommu_table_group_mem_t *mem;
461
462         mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift);
463         if (!mem)
464                 return -EINVAL;
465
466         ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa);
467         if (ret)
468                 return -EINVAL;
469
470         *pmem = mem;
471
472         return 0;
473 }
474
475 static void tce_iommu_unuse_page_v2(struct tce_container *container,
476                 struct iommu_table *tbl, unsigned long entry)
477 {
478         struct mm_iommu_table_group_mem_t *mem = NULL;
479         int ret;
480         unsigned long hpa = 0;
481         unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
482
483         if (!pua)
484                 return;
485
486         ret = tce_iommu_prereg_ua_to_hpa(container, *pua, tbl->it_page_shift,
487                         &hpa, &mem);
488         if (ret)
489                 pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
490                                 __func__, *pua, entry, ret);
491         if (mem)
492                 mm_iommu_mapped_dec(mem);
493
494         *pua = 0;
495 }
496
497 static int tce_iommu_clear(struct tce_container *container,
498                 struct iommu_table *tbl,
499                 unsigned long entry, unsigned long pages)
500 {
501         unsigned long oldhpa;
502         long ret;
503         enum dma_data_direction direction;
504
505         for ( ; pages; --pages, ++entry) {
506                 direction = DMA_NONE;
507                 oldhpa = 0;
508                 ret = iommu_tce_xchg(tbl, entry, &oldhpa, &direction);
509                 if (ret)
510                         continue;
511
512                 if (direction == DMA_NONE)
513                         continue;
514
515                 if (container->v2) {
516                         tce_iommu_unuse_page_v2(container, tbl, entry);
517                         continue;
518                 }
519
520                 tce_iommu_unuse_page(container, oldhpa);
521         }
522
523         return 0;
524 }
525
526 static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
527 {
528         struct page *page = NULL;
529         enum dma_data_direction direction = iommu_tce_direction(tce);
530
531         if (get_user_pages_fast(tce & PAGE_MASK, 1,
532                         direction != DMA_TO_DEVICE, &page) != 1)
533                 return -EFAULT;
534
535         *hpa = __pa((unsigned long) page_address(page));
536
537         return 0;
538 }
539
540 static long tce_iommu_build(struct tce_container *container,
541                 struct iommu_table *tbl,
542                 unsigned long entry, unsigned long tce, unsigned long pages,
543                 enum dma_data_direction direction)
544 {
545         long i, ret = 0;
546         struct page *page;
547         unsigned long hpa;
548         enum dma_data_direction dirtmp;
549
550         for (i = 0; i < pages; ++i) {
551                 unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
552
553                 ret = tce_iommu_use_page(tce, &hpa);
554                 if (ret)
555                         break;
556
557                 page = pfn_to_page(hpa >> PAGE_SHIFT);
558                 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
559                         ret = -EPERM;
560                         break;
561                 }
562
563                 hpa |= offset;
564                 dirtmp = direction;
565                 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
566                 if (ret) {
567                         tce_iommu_unuse_page(container, hpa);
568                         pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
569                                         __func__, entry << tbl->it_page_shift,
570                                         tce, ret);
571                         break;
572                 }
573
574                 if (dirtmp != DMA_NONE)
575                         tce_iommu_unuse_page(container, hpa);
576
577                 tce += IOMMU_PAGE_SIZE(tbl);
578         }
579
580         if (ret)
581                 tce_iommu_clear(container, tbl, entry, i);
582
583         return ret;
584 }
585
586 static long tce_iommu_build_v2(struct tce_container *container,
587                 struct iommu_table *tbl,
588                 unsigned long entry, unsigned long tce, unsigned long pages,
589                 enum dma_data_direction direction)
590 {
591         long i, ret = 0;
592         struct page *page;
593         unsigned long hpa;
594         enum dma_data_direction dirtmp;
595
596         if (!tbl->it_userspace) {
597                 ret = tce_iommu_userspace_view_alloc(tbl, container->mm);
598                 if (ret)
599                         return ret;
600         }
601
602         for (i = 0; i < pages; ++i) {
603                 struct mm_iommu_table_group_mem_t *mem = NULL;
604                 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
605                                 entry + i);
606
607                 ret = tce_iommu_prereg_ua_to_hpa(container,
608                                 tce, tbl->it_page_shift, &hpa, &mem);
609                 if (ret)
610                         break;
611
612                 page = pfn_to_page(hpa >> PAGE_SHIFT);
613                 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
614                         ret = -EPERM;
615                         break;
616                 }
617
618                 /* Preserve offset within IOMMU page */
619                 hpa |= tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
620                 dirtmp = direction;
621
622                 /* The registered region is being unregistered */
623                 if (mm_iommu_mapped_inc(mem))
624                         break;
625
626                 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
627                 if (ret) {
628                         /* dirtmp cannot be DMA_NONE here */
629                         tce_iommu_unuse_page_v2(container, tbl, entry + i);
630                         pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
631                                         __func__, entry << tbl->it_page_shift,
632                                         tce, ret);
633                         break;
634                 }
635
636                 if (dirtmp != DMA_NONE)
637                         tce_iommu_unuse_page_v2(container, tbl, entry + i);
638
639                 *pua = tce;
640
641                 tce += IOMMU_PAGE_SIZE(tbl);
642         }
643
644         if (ret)
645                 tce_iommu_clear(container, tbl, entry, i);
646
647         return ret;
648 }
649
650 static long tce_iommu_create_table(struct tce_container *container,
651                         struct iommu_table_group *table_group,
652                         int num,
653                         __u32 page_shift,
654                         __u64 window_size,
655                         __u32 levels,
656                         struct iommu_table **ptbl)
657 {
658         long ret, table_size;
659
660         table_size = table_group->ops->get_table_size(page_shift, window_size,
661                         levels);
662         if (!table_size)
663                 return -EINVAL;
664
665         ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT);
666         if (ret)
667                 return ret;
668
669         ret = table_group->ops->create_table(table_group, num,
670                         page_shift, window_size, levels, ptbl);
671
672         WARN_ON(!ret && !(*ptbl)->it_ops->free);
673         WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
674
675         return ret;
676 }
677
678 static void tce_iommu_free_table(struct tce_container *container,
679                 struct iommu_table *tbl)
680 {
681         unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
682
683         tce_iommu_userspace_view_free(tbl, container->mm);
684         iommu_tce_table_put(tbl);
685         decrement_locked_vm(container->mm, pages);
686 }
687
688 static long tce_iommu_create_window(struct tce_container *container,
689                 __u32 page_shift, __u64 window_size, __u32 levels,
690                 __u64 *start_addr)
691 {
692         struct tce_iommu_group *tcegrp;
693         struct iommu_table_group *table_group;
694         struct iommu_table *tbl = NULL;
695         long ret, num;
696
697         num = tce_iommu_find_free_table(container);
698         if (num < 0)
699                 return num;
700
701         /* Get the first group for ops::create_table */
702         tcegrp = list_first_entry(&container->group_list,
703                         struct tce_iommu_group, next);
704         table_group = iommu_group_get_iommudata(tcegrp->grp);
705         if (!table_group)
706                 return -EFAULT;
707
708         if (!(table_group->pgsizes & (1ULL << page_shift)))
709                 return -EINVAL;
710
711         if (!table_group->ops->set_window || !table_group->ops->unset_window ||
712                         !table_group->ops->get_table_size ||
713                         !table_group->ops->create_table)
714                 return -EPERM;
715
716         /* Create TCE table */
717         ret = tce_iommu_create_table(container, table_group, num,
718                         page_shift, window_size, levels, &tbl);
719         if (ret)
720                 return ret;
721
722         BUG_ON(!tbl->it_ops->free);
723
724         /*
725          * Program the table to every group.
726          * Groups have been tested for compatibility at the attach time.
727          */
728         list_for_each_entry(tcegrp, &container->group_list, next) {
729                 table_group = iommu_group_get_iommudata(tcegrp->grp);
730
731                 ret = table_group->ops->set_window(table_group, num, tbl);
732                 if (ret)
733                         goto unset_exit;
734         }
735
736         container->tables[num] = tbl;
737
738         /* Return start address assigned by platform in create_table() */
739         *start_addr = tbl->it_offset << tbl->it_page_shift;
740
741         return 0;
742
743 unset_exit:
744         list_for_each_entry(tcegrp, &container->group_list, next) {
745                 table_group = iommu_group_get_iommudata(tcegrp->grp);
746                 table_group->ops->unset_window(table_group, num);
747         }
748         tce_iommu_free_table(container, tbl);
749
750         return ret;
751 }
752
753 static long tce_iommu_remove_window(struct tce_container *container,
754                 __u64 start_addr)
755 {
756         struct iommu_table_group *table_group = NULL;
757         struct iommu_table *tbl;
758         struct tce_iommu_group *tcegrp;
759         int num;
760
761         num = tce_iommu_find_table(container, start_addr, &tbl);
762         if (num < 0)
763                 return -EINVAL;
764
765         BUG_ON(!tbl->it_size);
766
767         /* Detach groups from IOMMUs */
768         list_for_each_entry(tcegrp, &container->group_list, next) {
769                 table_group = iommu_group_get_iommudata(tcegrp->grp);
770
771                 /*
772                  * SPAPR TCE IOMMU exposes the default DMA window to
773                  * the guest via dma32_window_start/size of
774                  * VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow
775                  * the userspace to remove this window, some do not so
776                  * here we check for the platform capability.
777                  */
778                 if (!table_group->ops || !table_group->ops->unset_window)
779                         return -EPERM;
780
781                 table_group->ops->unset_window(table_group, num);
782         }
783
784         /* Free table */
785         tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
786         tce_iommu_free_table(container, tbl);
787         container->tables[num] = NULL;
788
789         return 0;
790 }
791
792 static long tce_iommu_create_default_window(struct tce_container *container)
793 {
794         long ret;
795         __u64 start_addr = 0;
796         struct tce_iommu_group *tcegrp;
797         struct iommu_table_group *table_group;
798
799         if (!container->def_window_pending)
800                 return 0;
801
802         if (!tce_groups_attached(container))
803                 return -ENODEV;
804
805         tcegrp = list_first_entry(&container->group_list,
806                         struct tce_iommu_group, next);
807         table_group = iommu_group_get_iommudata(tcegrp->grp);
808         if (!table_group)
809                 return -ENODEV;
810
811         ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K,
812                         table_group->tce32_size, 1, &start_addr);
813         WARN_ON_ONCE(!ret && start_addr);
814
815         if (!ret)
816                 container->def_window_pending = false;
817
818         return ret;
819 }
820
821 static long tce_iommu_ioctl(void *iommu_data,
822                                  unsigned int cmd, unsigned long arg)
823 {
824         struct tce_container *container = iommu_data;
825         unsigned long minsz, ddwsz;
826         long ret;
827
828         switch (cmd) {
829         case VFIO_CHECK_EXTENSION:
830                 switch (arg) {
831                 case VFIO_SPAPR_TCE_IOMMU:
832                 case VFIO_SPAPR_TCE_v2_IOMMU:
833                         ret = 1;
834                         break;
835                 default:
836                         ret = vfio_spapr_iommu_eeh_ioctl(NULL, cmd, arg);
837                         break;
838                 }
839
840                 return (ret < 0) ? 0 : ret;
841         }
842
843         /*
844          * Sanity check to prevent one userspace from manipulating
845          * another userspace mm.
846          */
847         BUG_ON(!container);
848         if (container->mm && container->mm != current->mm)
849                 return -EPERM;
850
851         switch (cmd) {
852         case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
853                 struct vfio_iommu_spapr_tce_info info;
854                 struct tce_iommu_group *tcegrp;
855                 struct iommu_table_group *table_group;
856
857                 if (!tce_groups_attached(container))
858                         return -ENXIO;
859
860                 tcegrp = list_first_entry(&container->group_list,
861                                 struct tce_iommu_group, next);
862                 table_group = iommu_group_get_iommudata(tcegrp->grp);
863
864                 if (!table_group)
865                         return -ENXIO;
866
867                 minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
868                                 dma32_window_size);
869
870                 if (copy_from_user(&info, (void __user *)arg, minsz))
871                         return -EFAULT;
872
873                 if (info.argsz < minsz)
874                         return -EINVAL;
875
876                 info.dma32_window_start = table_group->tce32_start;
877                 info.dma32_window_size = table_group->tce32_size;
878                 info.flags = 0;
879                 memset(&info.ddw, 0, sizeof(info.ddw));
880
881                 if (table_group->max_dynamic_windows_supported &&
882                                 container->v2) {
883                         info.flags |= VFIO_IOMMU_SPAPR_INFO_DDW;
884                         info.ddw.pgsizes = table_group->pgsizes;
885                         info.ddw.max_dynamic_windows_supported =
886                                 table_group->max_dynamic_windows_supported;
887                         info.ddw.levels = table_group->max_levels;
888                 }
889
890                 ddwsz = offsetofend(struct vfio_iommu_spapr_tce_info, ddw);
891
892                 if (info.argsz >= ddwsz)
893                         minsz = ddwsz;
894
895                 if (copy_to_user((void __user *)arg, &info, minsz))
896                         return -EFAULT;
897
898                 return 0;
899         }
900         case VFIO_IOMMU_MAP_DMA: {
901                 struct vfio_iommu_type1_dma_map param;
902                 struct iommu_table *tbl = NULL;
903                 long num;
904                 enum dma_data_direction direction;
905
906                 if (!container->enabled)
907                         return -EPERM;
908
909                 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
910
911                 if (copy_from_user(&param, (void __user *)arg, minsz))
912                         return -EFAULT;
913
914                 if (param.argsz < minsz)
915                         return -EINVAL;
916
917                 if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ |
918                                 VFIO_DMA_MAP_FLAG_WRITE))
919                         return -EINVAL;
920
921                 ret = tce_iommu_create_default_window(container);
922                 if (ret)
923                         return ret;
924
925                 num = tce_iommu_find_table(container, param.iova, &tbl);
926                 if (num < 0)
927                         return -ENXIO;
928
929                 if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
930                                 (param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
931                         return -EINVAL;
932
933                 /* iova is checked by the IOMMU API */
934                 if (param.flags & VFIO_DMA_MAP_FLAG_READ) {
935                         if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
936                                 direction = DMA_BIDIRECTIONAL;
937                         else
938                                 direction = DMA_TO_DEVICE;
939                 } else {
940                         if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
941                                 direction = DMA_FROM_DEVICE;
942                         else
943                                 return -EINVAL;
944                 }
945
946                 ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr);
947                 if (ret)
948                         return ret;
949
950                 if (container->v2)
951                         ret = tce_iommu_build_v2(container, tbl,
952                                         param.iova >> tbl->it_page_shift,
953                                         param.vaddr,
954                                         param.size >> tbl->it_page_shift,
955                                         direction);
956                 else
957                         ret = tce_iommu_build(container, tbl,
958                                         param.iova >> tbl->it_page_shift,
959                                         param.vaddr,
960                                         param.size >> tbl->it_page_shift,
961                                         direction);
962
963                 iommu_flush_tce(tbl);
964
965                 return ret;
966         }
967         case VFIO_IOMMU_UNMAP_DMA: {
968                 struct vfio_iommu_type1_dma_unmap param;
969                 struct iommu_table *tbl = NULL;
970                 long num;
971
972                 if (!container->enabled)
973                         return -EPERM;
974
975                 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
976                                 size);
977
978                 if (copy_from_user(&param, (void __user *)arg, minsz))
979                         return -EFAULT;
980
981                 if (param.argsz < minsz)
982                         return -EINVAL;
983
984                 /* No flag is supported now */
985                 if (param.flags)
986                         return -EINVAL;
987
988                 ret = tce_iommu_create_default_window(container);
989                 if (ret)
990                         return ret;
991
992                 num = tce_iommu_find_table(container, param.iova, &tbl);
993                 if (num < 0)
994                         return -ENXIO;
995
996                 if (param.size & ~IOMMU_PAGE_MASK(tbl))
997                         return -EINVAL;
998
999                 ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
1000                                 param.size >> tbl->it_page_shift);
1001                 if (ret)
1002                         return ret;
1003
1004                 ret = tce_iommu_clear(container, tbl,
1005                                 param.iova >> tbl->it_page_shift,
1006                                 param.size >> tbl->it_page_shift);
1007                 iommu_flush_tce(tbl);
1008
1009                 return ret;
1010         }
1011         case VFIO_IOMMU_SPAPR_REGISTER_MEMORY: {
1012                 struct vfio_iommu_spapr_register_memory param;
1013
1014                 if (!container->v2)
1015                         break;
1016
1017                 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
1018                                 size);
1019
1020                 ret = tce_iommu_mm_set(container);
1021                 if (ret)
1022                         return ret;
1023
1024                 if (copy_from_user(&param, (void __user *)arg, minsz))
1025                         return -EFAULT;
1026
1027                 if (param.argsz < minsz)
1028                         return -EINVAL;
1029
1030                 /* No flag is supported now */
1031                 if (param.flags)
1032                         return -EINVAL;
1033
1034                 mutex_lock(&container->lock);
1035                 ret = tce_iommu_register_pages(container, param.vaddr,
1036                                 param.size);
1037                 mutex_unlock(&container->lock);
1038
1039                 return ret;
1040         }
1041         case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY: {
1042                 struct vfio_iommu_spapr_register_memory param;
1043
1044                 if (!container->v2)
1045                         break;
1046
1047                 if (!container->mm)
1048                         return -EPERM;
1049
1050                 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
1051                                 size);
1052
1053                 if (copy_from_user(&param, (void __user *)arg, minsz))
1054                         return -EFAULT;
1055
1056                 if (param.argsz < minsz)
1057                         return -EINVAL;
1058
1059                 /* No flag is supported now */
1060                 if (param.flags)
1061                         return -EINVAL;
1062
1063                 mutex_lock(&container->lock);
1064                 ret = tce_iommu_unregister_pages(container, param.vaddr,
1065                                 param.size);
1066                 mutex_unlock(&container->lock);
1067
1068                 return ret;
1069         }
1070         case VFIO_IOMMU_ENABLE:
1071                 if (container->v2)
1072                         break;
1073
1074                 mutex_lock(&container->lock);
1075                 ret = tce_iommu_enable(container);
1076                 mutex_unlock(&container->lock);
1077                 return ret;
1078
1079
1080         case VFIO_IOMMU_DISABLE:
1081                 if (container->v2)
1082                         break;
1083
1084                 mutex_lock(&container->lock);
1085                 tce_iommu_disable(container);
1086                 mutex_unlock(&container->lock);
1087                 return 0;
1088
1089         case VFIO_EEH_PE_OP: {
1090                 struct tce_iommu_group *tcegrp;
1091
1092                 ret = 0;
1093                 list_for_each_entry(tcegrp, &container->group_list, next) {
1094                         ret = vfio_spapr_iommu_eeh_ioctl(tcegrp->grp,
1095                                         cmd, arg);
1096                         if (ret)
1097                                 return ret;
1098                 }
1099                 return ret;
1100         }
1101
1102         case VFIO_IOMMU_SPAPR_TCE_CREATE: {
1103                 struct vfio_iommu_spapr_tce_create create;
1104
1105                 if (!container->v2)
1106                         break;
1107
1108                 ret = tce_iommu_mm_set(container);
1109                 if (ret)
1110                         return ret;
1111
1112                 if (!tce_groups_attached(container))
1113                         return -ENXIO;
1114
1115                 minsz = offsetofend(struct vfio_iommu_spapr_tce_create,
1116                                 start_addr);
1117
1118                 if (copy_from_user(&create, (void __user *)arg, minsz))
1119                         return -EFAULT;
1120
1121                 if (create.argsz < minsz)
1122                         return -EINVAL;
1123
1124                 if (create.flags)
1125                         return -EINVAL;
1126
1127                 mutex_lock(&container->lock);
1128
1129                 ret = tce_iommu_create_default_window(container);
1130                 if (!ret)
1131                         ret = tce_iommu_create_window(container,
1132                                         create.page_shift,
1133                                         create.window_size, create.levels,
1134                                         &create.start_addr);
1135
1136                 mutex_unlock(&container->lock);
1137
1138                 if (!ret && copy_to_user((void __user *)arg, &create, minsz))
1139                         ret = -EFAULT;
1140
1141                 return ret;
1142         }
1143         case VFIO_IOMMU_SPAPR_TCE_REMOVE: {
1144                 struct vfio_iommu_spapr_tce_remove remove;
1145
1146                 if (!container->v2)
1147                         break;
1148
1149                 ret = tce_iommu_mm_set(container);
1150                 if (ret)
1151                         return ret;
1152
1153                 if (!tce_groups_attached(container))
1154                         return -ENXIO;
1155
1156                 minsz = offsetofend(struct vfio_iommu_spapr_tce_remove,
1157                                 start_addr);
1158
1159                 if (copy_from_user(&remove, (void __user *)arg, minsz))
1160                         return -EFAULT;
1161
1162                 if (remove.argsz < minsz)
1163                         return -EINVAL;
1164
1165                 if (remove.flags)
1166                         return -EINVAL;
1167
1168                 if (container->def_window_pending && !remove.start_addr) {
1169                         container->def_window_pending = false;
1170                         return 0;
1171                 }
1172
1173                 mutex_lock(&container->lock);
1174
1175                 ret = tce_iommu_remove_window(container, remove.start_addr);
1176
1177                 mutex_unlock(&container->lock);
1178
1179                 return ret;
1180         }
1181         }
1182
1183         return -ENOTTY;
1184 }
1185
1186 static void tce_iommu_release_ownership(struct tce_container *container,
1187                 struct iommu_table_group *table_group)
1188 {
1189         int i;
1190
1191         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1192                 struct iommu_table *tbl = container->tables[i];
1193
1194                 if (!tbl)
1195                         continue;
1196
1197                 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
1198                 tce_iommu_userspace_view_free(tbl, container->mm);
1199                 if (tbl->it_map)
1200                         iommu_release_ownership(tbl);
1201
1202                 container->tables[i] = NULL;
1203         }
1204 }
1205
1206 static int tce_iommu_take_ownership(struct tce_container *container,
1207                 struct iommu_table_group *table_group)
1208 {
1209         int i, j, rc = 0;
1210
1211         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1212                 struct iommu_table *tbl = table_group->tables[i];
1213
1214                 if (!tbl || !tbl->it_map)
1215                         continue;
1216
1217                 rc = iommu_take_ownership(tbl);
1218                 if (rc) {
1219                         for (j = 0; j < i; ++j)
1220                                 iommu_release_ownership(
1221                                                 table_group->tables[j]);
1222
1223                         return rc;
1224                 }
1225         }
1226
1227         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1228                 container->tables[i] = table_group->tables[i];
1229
1230         return 0;
1231 }
1232
1233 static void tce_iommu_release_ownership_ddw(struct tce_container *container,
1234                 struct iommu_table_group *table_group)
1235 {
1236         long i;
1237
1238         if (!table_group->ops->unset_window) {
1239                 WARN_ON_ONCE(1);
1240                 return;
1241         }
1242
1243         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1244                 table_group->ops->unset_window(table_group, i);
1245
1246         table_group->ops->release_ownership(table_group);
1247 }
1248
1249 static long tce_iommu_take_ownership_ddw(struct tce_container *container,
1250                 struct iommu_table_group *table_group)
1251 {
1252         long i, ret = 0;
1253
1254         if (!table_group->ops->create_table || !table_group->ops->set_window ||
1255                         !table_group->ops->release_ownership) {
1256                 WARN_ON_ONCE(1);
1257                 return -EFAULT;
1258         }
1259
1260         table_group->ops->take_ownership(table_group);
1261
1262         /* Set all windows to the new group */
1263         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1264                 struct iommu_table *tbl = container->tables[i];
1265
1266                 if (!tbl)
1267                         continue;
1268
1269                 ret = table_group->ops->set_window(table_group, i, tbl);
1270                 if (ret)
1271                         goto release_exit;
1272         }
1273
1274         return 0;
1275
1276 release_exit:
1277         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1278                 table_group->ops->unset_window(table_group, i);
1279
1280         table_group->ops->release_ownership(table_group);
1281
1282         return ret;
1283 }
1284
1285 static int tce_iommu_attach_group(void *iommu_data,
1286                 struct iommu_group *iommu_group)
1287 {
1288         int ret;
1289         struct tce_container *container = iommu_data;
1290         struct iommu_table_group *table_group;
1291         struct tce_iommu_group *tcegrp = NULL;
1292
1293         mutex_lock(&container->lock);
1294
1295         /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
1296                         iommu_group_id(iommu_group), iommu_group); */
1297         table_group = iommu_group_get_iommudata(iommu_group);
1298         if (!table_group) {
1299                 ret = -ENODEV;
1300                 goto unlock_exit;
1301         }
1302
1303         if (tce_groups_attached(container) && (!table_group->ops ||
1304                         !table_group->ops->take_ownership ||
1305                         !table_group->ops->release_ownership)) {
1306                 ret = -EBUSY;
1307                 goto unlock_exit;
1308         }
1309
1310         /* Check if new group has the same iommu_ops (i.e. compatible) */
1311         list_for_each_entry(tcegrp, &container->group_list, next) {
1312                 struct iommu_table_group *table_group_tmp;
1313
1314                 if (tcegrp->grp == iommu_group) {
1315                         pr_warn("tce_vfio: Group %d is already attached\n",
1316                                         iommu_group_id(iommu_group));
1317                         ret = -EBUSY;
1318                         goto unlock_exit;
1319                 }
1320                 table_group_tmp = iommu_group_get_iommudata(tcegrp->grp);
1321                 if (table_group_tmp->ops->create_table !=
1322                                 table_group->ops->create_table) {
1323                         pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
1324                                         iommu_group_id(iommu_group),
1325                                         iommu_group_id(tcegrp->grp));
1326                         ret = -EPERM;
1327                         goto unlock_exit;
1328                 }
1329         }
1330
1331         tcegrp = kzalloc(sizeof(*tcegrp), GFP_KERNEL);
1332         if (!tcegrp) {
1333                 ret = -ENOMEM;
1334                 goto unlock_exit;
1335         }
1336
1337         if (!table_group->ops || !table_group->ops->take_ownership ||
1338                         !table_group->ops->release_ownership) {
1339                 if (container->v2) {
1340                         ret = -EPERM;
1341                         goto unlock_exit;
1342                 }
1343                 ret = tce_iommu_take_ownership(container, table_group);
1344         } else {
1345                 if (!container->v2) {
1346                         ret = -EPERM;
1347                         goto unlock_exit;
1348                 }
1349                 ret = tce_iommu_take_ownership_ddw(container, table_group);
1350                 if (!tce_groups_attached(container) && !container->tables[0])
1351                         container->def_window_pending = true;
1352         }
1353
1354         if (!ret) {
1355                 tcegrp->grp = iommu_group;
1356                 list_add(&tcegrp->next, &container->group_list);
1357         }
1358
1359 unlock_exit:
1360         if (ret && tcegrp)
1361                 kfree(tcegrp);
1362
1363         mutex_unlock(&container->lock);
1364
1365         return ret;
1366 }
1367
1368 static void tce_iommu_detach_group(void *iommu_data,
1369                 struct iommu_group *iommu_group)
1370 {
1371         struct tce_container *container = iommu_data;
1372         struct iommu_table_group *table_group;
1373         bool found = false;
1374         struct tce_iommu_group *tcegrp;
1375
1376         mutex_lock(&container->lock);
1377
1378         list_for_each_entry(tcegrp, &container->group_list, next) {
1379                 if (tcegrp->grp == iommu_group) {
1380                         found = true;
1381                         break;
1382                 }
1383         }
1384
1385         if (!found) {
1386                 pr_warn("tce_vfio: detaching unattached group #%u\n",
1387                                 iommu_group_id(iommu_group));
1388                 goto unlock_exit;
1389         }
1390
1391         list_del(&tcegrp->next);
1392         kfree(tcegrp);
1393
1394         table_group = iommu_group_get_iommudata(iommu_group);
1395         BUG_ON(!table_group);
1396
1397         if (!table_group->ops || !table_group->ops->release_ownership)
1398                 tce_iommu_release_ownership(container, table_group);
1399         else
1400                 tce_iommu_release_ownership_ddw(container, table_group);
1401
1402 unlock_exit:
1403         mutex_unlock(&container->lock);
1404 }
1405
1406 const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
1407         .name           = "iommu-vfio-powerpc",
1408         .owner          = THIS_MODULE,
1409         .open           = tce_iommu_open,
1410         .release        = tce_iommu_release,
1411         .ioctl          = tce_iommu_ioctl,
1412         .attach_group   = tce_iommu_attach_group,
1413         .detach_group   = tce_iommu_detach_group,
1414 };
1415
1416 static int __init tce_iommu_init(void)
1417 {
1418         return vfio_register_iommu_driver(&tce_iommu_driver_ops);
1419 }
1420
1421 static void __exit tce_iommu_cleanup(void)
1422 {
1423         vfio_unregister_iommu_driver(&tce_iommu_driver_ops);
1424 }
1425
1426 module_init(tce_iommu_init);
1427 module_exit(tce_iommu_cleanup);
1428
1429 MODULE_VERSION(DRIVER_VERSION);
1430 MODULE_LICENSE("GPL v2");
1431 MODULE_AUTHOR(DRIVER_AUTHOR);
1432 MODULE_DESCRIPTION(DRIVER_DESC);
1433