GNU Linux-libre 4.9.290-gnu1
[releases.git] / drivers / vfio / vfio_iommu_spapr_tce.c
1 /*
2  * VFIO: IOMMU DMA mapping support for TCE on POWER
3  *
4  * Copyright (C) 2013 IBM Corp.  All rights reserved.
5  *     Author: Alexey Kardashevskiy <aik@ozlabs.ru>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * Derived from original vfio_iommu_type1.c:
12  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
13  *     Author: Alex Williamson <alex.williamson@redhat.com>
14  */
15
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <linux/err.h>
21 #include <linux/vfio.h>
22 #include <linux/vmalloc.h>
23 #include <asm/iommu.h>
24 #include <asm/tce.h>
25 #include <asm/mmu_context.h>
26
27 #define DRIVER_VERSION  "0.1"
28 #define DRIVER_AUTHOR   "aik@ozlabs.ru"
29 #define DRIVER_DESC     "VFIO IOMMU SPAPR TCE"
30
31 static void tce_iommu_detach_group(void *iommu_data,
32                 struct iommu_group *iommu_group);
33
34 static long try_increment_locked_vm(struct mm_struct *mm, long npages)
35 {
36         long ret = 0, locked, lock_limit;
37
38         if (WARN_ON_ONCE(!mm))
39                 return -EPERM;
40
41         if (!npages)
42                 return 0;
43
44         down_write(&mm->mmap_sem);
45         locked = mm->locked_vm + npages;
46         lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
47         if (locked > lock_limit && !capable(CAP_IPC_LOCK))
48                 ret = -ENOMEM;
49         else
50                 mm->locked_vm += npages;
51
52         pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
53                         npages << PAGE_SHIFT,
54                         mm->locked_vm << PAGE_SHIFT,
55                         rlimit(RLIMIT_MEMLOCK),
56                         ret ? " - exceeded" : "");
57
58         up_write(&mm->mmap_sem);
59
60         return ret;
61 }
62
63 static void decrement_locked_vm(struct mm_struct *mm, long npages)
64 {
65         if (!mm || !npages)
66                 return;
67
68         down_write(&mm->mmap_sem);
69         if (WARN_ON_ONCE(npages > mm->locked_vm))
70                 npages = mm->locked_vm;
71         mm->locked_vm -= npages;
72         pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
73                         npages << PAGE_SHIFT,
74                         mm->locked_vm << PAGE_SHIFT,
75                         rlimit(RLIMIT_MEMLOCK));
76         up_write(&mm->mmap_sem);
77 }
78
79 /*
80  * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
81  *
82  * This code handles mapping and unmapping of user data buffers
83  * into DMA'ble space using the IOMMU
84  */
85
86 struct tce_iommu_group {
87         struct list_head next;
88         struct iommu_group *grp;
89 };
90
91 /*
92  * A container needs to remember which preregistered region  it has
93  * referenced to do proper cleanup at the userspace process exit.
94  */
95 struct tce_iommu_prereg {
96         struct list_head next;
97         struct mm_iommu_table_group_mem_t *mem;
98 };
99
100 /*
101  * The container descriptor supports only a single group per container.
102  * Required by the API as the container is not supplied with the IOMMU group
103  * at the moment of initialization.
104  */
105 struct tce_container {
106         struct mutex lock;
107         bool enabled;
108         bool v2;
109         bool def_window_pending;
110         unsigned long locked_pages;
111         struct mm_struct *mm;
112         struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
113         struct list_head group_list;
114         struct list_head prereg_list;
115 };
116
117 static long tce_iommu_mm_set(struct tce_container *container)
118 {
119         if (container->mm) {
120                 if (container->mm == current->mm)
121                         return 0;
122                 return -EPERM;
123         }
124         BUG_ON(!current->mm);
125         container->mm = current->mm;
126         atomic_inc(&container->mm->mm_count);
127
128         return 0;
129 }
130
131 static long tce_iommu_prereg_free(struct tce_container *container,
132                 struct tce_iommu_prereg *tcemem)
133 {
134         long ret;
135
136         ret = mm_iommu_put(container->mm, tcemem->mem);
137         if (ret)
138                 return ret;
139
140         list_del(&tcemem->next);
141         kfree(tcemem);
142
143         return 0;
144 }
145
146 static long tce_iommu_unregister_pages(struct tce_container *container,
147                 __u64 vaddr, __u64 size)
148 {
149         struct mm_iommu_table_group_mem_t *mem;
150         struct tce_iommu_prereg *tcemem;
151         bool found = false;
152
153         if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
154                 return -EINVAL;
155
156         mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT);
157         if (!mem)
158                 return -ENOENT;
159
160         list_for_each_entry(tcemem, &container->prereg_list, next) {
161                 if (tcemem->mem == mem) {
162                         found = true;
163                         break;
164                 }
165         }
166
167         if (!found)
168                 return -ENOENT;
169
170         return tce_iommu_prereg_free(container, tcemem);
171 }
172
173 static long tce_iommu_register_pages(struct tce_container *container,
174                 __u64 vaddr, __u64 size)
175 {
176         long ret = 0;
177         struct mm_iommu_table_group_mem_t *mem = NULL;
178         struct tce_iommu_prereg *tcemem;
179         unsigned long entries = size >> PAGE_SHIFT;
180
181         if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
182                         ((vaddr + size) < vaddr))
183                 return -EINVAL;
184
185         mem = mm_iommu_find(container->mm, vaddr, entries);
186         if (mem) {
187                 list_for_each_entry(tcemem, &container->prereg_list, next) {
188                         if (tcemem->mem == mem)
189                                 return -EBUSY;
190                 }
191         }
192
193         ret = mm_iommu_get(container->mm, vaddr, entries, &mem);
194         if (ret)
195                 return ret;
196
197         tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
198         if (!tcemem) {
199                 mm_iommu_put(container->mm, mem);
200                 return -ENOMEM;
201         }
202
203         tcemem->mem = mem;
204         list_add(&tcemem->next, &container->prereg_list);
205
206         container->enabled = true;
207
208         return 0;
209 }
210
211 static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl,
212                 struct mm_struct *mm)
213 {
214         unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
215                         tbl->it_size, PAGE_SIZE);
216         unsigned long *uas;
217         long ret;
218
219         BUG_ON(tbl->it_userspace);
220
221         ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT);
222         if (ret)
223                 return ret;
224
225         uas = vzalloc(cb);
226         if (!uas) {
227                 decrement_locked_vm(mm, cb >> PAGE_SHIFT);
228                 return -ENOMEM;
229         }
230         tbl->it_userspace = uas;
231
232         return 0;
233 }
234
235 static void tce_iommu_userspace_view_free(struct iommu_table *tbl,
236                 struct mm_struct *mm)
237 {
238         unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
239                         tbl->it_size, PAGE_SIZE);
240
241         if (!tbl->it_userspace)
242                 return;
243
244         vfree(tbl->it_userspace);
245         tbl->it_userspace = NULL;
246         decrement_locked_vm(mm, cb >> PAGE_SHIFT);
247 }
248
249 static bool tce_page_is_contained(struct page *page, unsigned page_shift)
250 {
251         /*
252          * Check that the TCE table granularity is not bigger than the size of
253          * a page we just found. Otherwise the hardware can get access to
254          * a bigger memory chunk that it should.
255          */
256         return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift;
257 }
258
259 static inline bool tce_groups_attached(struct tce_container *container)
260 {
261         return !list_empty(&container->group_list);
262 }
263
264 static long tce_iommu_find_table(struct tce_container *container,
265                 phys_addr_t ioba, struct iommu_table **ptbl)
266 {
267         long i;
268
269         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
270                 struct iommu_table *tbl = container->tables[i];
271
272                 if (tbl) {
273                         unsigned long entry = ioba >> tbl->it_page_shift;
274                         unsigned long start = tbl->it_offset;
275                         unsigned long end = start + tbl->it_size;
276
277                         if ((start <= entry) && (entry < end)) {
278                                 *ptbl = tbl;
279                                 return i;
280                         }
281                 }
282         }
283
284         return -1;
285 }
286
287 static int tce_iommu_find_free_table(struct tce_container *container)
288 {
289         int i;
290
291         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
292                 if (!container->tables[i])
293                         return i;
294         }
295
296         return -ENOSPC;
297 }
298
299 static int tce_iommu_enable(struct tce_container *container)
300 {
301         int ret = 0;
302         unsigned long locked;
303         struct iommu_table_group *table_group;
304         struct tce_iommu_group *tcegrp;
305
306         if (container->enabled)
307                 return -EBUSY;
308
309         /*
310          * When userspace pages are mapped into the IOMMU, they are effectively
311          * locked memory, so, theoretically, we need to update the accounting
312          * of locked pages on each map and unmap.  For powerpc, the map unmap
313          * paths can be very hot, though, and the accounting would kill
314          * performance, especially since it would be difficult to impossible
315          * to handle the accounting in real mode only.
316          *
317          * To address that, rather than precisely accounting every page, we
318          * instead account for a worst case on locked memory when the iommu is
319          * enabled and disabled.  The worst case upper bound on locked memory
320          * is the size of the whole iommu window, which is usually relatively
321          * small (compared to total memory sizes) on POWER hardware.
322          *
323          * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
324          * that would effectively kill the guest at random points, much better
325          * enforcing the limit based on the max that the guest can map.
326          *
327          * Unfortunately at the moment it counts whole tables, no matter how
328          * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
329          * each with 2GB DMA window, 8GB will be counted here. The reason for
330          * this is that we cannot tell here the amount of RAM used by the guest
331          * as this information is only available from KVM and VFIO is
332          * KVM agnostic.
333          *
334          * So we do not allow enabling a container without a group attached
335          * as there is no way to know how much we should increment
336          * the locked_vm counter.
337          */
338         if (!tce_groups_attached(container))
339                 return -ENODEV;
340
341         tcegrp = list_first_entry(&container->group_list,
342                         struct tce_iommu_group, next);
343         table_group = iommu_group_get_iommudata(tcegrp->grp);
344         if (!table_group)
345                 return -ENODEV;
346
347         if (!table_group->tce32_size)
348                 return -EPERM;
349
350         ret = tce_iommu_mm_set(container);
351         if (ret)
352                 return ret;
353
354         locked = table_group->tce32_size >> PAGE_SHIFT;
355         ret = try_increment_locked_vm(container->mm, locked);
356         if (ret)
357                 return ret;
358
359         container->locked_pages = locked;
360
361         container->enabled = true;
362
363         return ret;
364 }
365
366 static void tce_iommu_disable(struct tce_container *container)
367 {
368         if (!container->enabled)
369                 return;
370
371         container->enabled = false;
372
373         BUG_ON(!container->mm);
374         decrement_locked_vm(container->mm, container->locked_pages);
375 }
376
377 static void *tce_iommu_open(unsigned long arg)
378 {
379         struct tce_container *container;
380
381         if ((arg != VFIO_SPAPR_TCE_IOMMU) && (arg != VFIO_SPAPR_TCE_v2_IOMMU)) {
382                 pr_err("tce_vfio: Wrong IOMMU type\n");
383                 return ERR_PTR(-EINVAL);
384         }
385
386         container = kzalloc(sizeof(*container), GFP_KERNEL);
387         if (!container)
388                 return ERR_PTR(-ENOMEM);
389
390         mutex_init(&container->lock);
391         INIT_LIST_HEAD_RCU(&container->group_list);
392         INIT_LIST_HEAD_RCU(&container->prereg_list);
393
394         container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
395
396         return container;
397 }
398
399 static int tce_iommu_clear(struct tce_container *container,
400                 struct iommu_table *tbl,
401                 unsigned long entry, unsigned long pages);
402 static void tce_iommu_free_table(struct tce_container *container,
403                 struct iommu_table *tbl);
404
405 static void tce_iommu_release(void *iommu_data)
406 {
407         struct tce_container *container = iommu_data;
408         struct tce_iommu_group *tcegrp;
409         struct tce_iommu_prereg *tcemem, *tmtmp;
410         long i;
411
412         while (tce_groups_attached(container)) {
413                 tcegrp = list_first_entry(&container->group_list,
414                                 struct tce_iommu_group, next);
415                 tce_iommu_detach_group(iommu_data, tcegrp->grp);
416         }
417
418         /*
419          * If VFIO created a table, it was not disposed
420          * by tce_iommu_detach_group() so do it now.
421          */
422         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
423                 struct iommu_table *tbl = container->tables[i];
424
425                 if (!tbl)
426                         continue;
427
428                 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
429                 tce_iommu_free_table(container, tbl);
430         }
431
432         list_for_each_entry_safe(tcemem, tmtmp, &container->prereg_list, next)
433                 WARN_ON(tce_iommu_prereg_free(container, tcemem));
434
435         tce_iommu_disable(container);
436         if (container->mm)
437                 mmdrop(container->mm);
438         mutex_destroy(&container->lock);
439
440         kfree(container);
441 }
442
443 static void tce_iommu_unuse_page(struct tce_container *container,
444                 unsigned long hpa)
445 {
446         struct page *page;
447
448         page = pfn_to_page(hpa >> PAGE_SHIFT);
449         put_page(page);
450 }
451
452 static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
453                 unsigned long tce, unsigned long size,
454                 unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
455 {
456         long ret = 0;
457         struct mm_iommu_table_group_mem_t *mem;
458
459         mem = mm_iommu_lookup(container->mm, tce, size);
460         if (!mem)
461                 return -EINVAL;
462
463         ret = mm_iommu_ua_to_hpa(mem, tce, phpa);
464         if (ret)
465                 return -EINVAL;
466
467         *pmem = mem;
468
469         return 0;
470 }
471
472 static void tce_iommu_unuse_page_v2(struct tce_container *container,
473                 struct iommu_table *tbl, unsigned long entry)
474 {
475         struct mm_iommu_table_group_mem_t *mem = NULL;
476         int ret;
477         unsigned long hpa = 0;
478         unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
479
480         if (!pua)
481                 return;
482
483         ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl),
484                         &hpa, &mem);
485         if (ret)
486                 pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
487                                 __func__, *pua, entry, ret);
488         if (mem)
489                 mm_iommu_mapped_dec(mem);
490
491         *pua = 0;
492 }
493
494 static int tce_iommu_clear(struct tce_container *container,
495                 struct iommu_table *tbl,
496                 unsigned long entry, unsigned long pages)
497 {
498         unsigned long oldhpa;
499         long ret;
500         enum dma_data_direction direction;
501
502         for ( ; pages; --pages, ++entry) {
503                 direction = DMA_NONE;
504                 oldhpa = 0;
505                 ret = iommu_tce_xchg(tbl, entry, &oldhpa, &direction);
506                 if (ret)
507                         continue;
508
509                 if (direction == DMA_NONE)
510                         continue;
511
512                 if (container->v2) {
513                         tce_iommu_unuse_page_v2(container, tbl, entry);
514                         continue;
515                 }
516
517                 tce_iommu_unuse_page(container, oldhpa);
518         }
519
520         return 0;
521 }
522
523 static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
524 {
525         struct page *page = NULL;
526         enum dma_data_direction direction = iommu_tce_direction(tce);
527
528         if (get_user_pages_fast(tce & PAGE_MASK, 1,
529                         direction != DMA_TO_DEVICE, &page) != 1)
530                 return -EFAULT;
531
532         *hpa = __pa((unsigned long) page_address(page));
533
534         return 0;
535 }
536
537 static long tce_iommu_build(struct tce_container *container,
538                 struct iommu_table *tbl,
539                 unsigned long entry, unsigned long tce, unsigned long pages,
540                 enum dma_data_direction direction)
541 {
542         long i, ret = 0;
543         struct page *page;
544         unsigned long hpa;
545         enum dma_data_direction dirtmp;
546
547         for (i = 0; i < pages; ++i) {
548                 unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
549
550                 ret = tce_iommu_use_page(tce, &hpa);
551                 if (ret)
552                         break;
553
554                 page = pfn_to_page(hpa >> PAGE_SHIFT);
555                 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
556                         ret = -EPERM;
557                         break;
558                 }
559
560                 hpa |= offset;
561                 dirtmp = direction;
562                 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
563                 if (ret) {
564                         tce_iommu_unuse_page(container, hpa);
565                         pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
566                                         __func__, entry << tbl->it_page_shift,
567                                         tce, ret);
568                         break;
569                 }
570
571                 if (dirtmp != DMA_NONE)
572                         tce_iommu_unuse_page(container, hpa);
573
574                 tce += IOMMU_PAGE_SIZE(tbl);
575         }
576
577         if (ret)
578                 tce_iommu_clear(container, tbl, entry, i);
579
580         return ret;
581 }
582
583 static long tce_iommu_build_v2(struct tce_container *container,
584                 struct iommu_table *tbl,
585                 unsigned long entry, unsigned long tce, unsigned long pages,
586                 enum dma_data_direction direction)
587 {
588         long i, ret = 0;
589         struct page *page;
590         unsigned long hpa;
591         enum dma_data_direction dirtmp;
592
593         if (!tbl->it_userspace) {
594                 ret = tce_iommu_userspace_view_alloc(tbl, container->mm);
595                 if (ret)
596                         return ret;
597         }
598
599         for (i = 0; i < pages; ++i) {
600                 struct mm_iommu_table_group_mem_t *mem = NULL;
601                 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
602                                 entry + i);
603
604                 ret = tce_iommu_prereg_ua_to_hpa(container,
605                                 tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
606                 if (ret)
607                         break;
608
609                 page = pfn_to_page(hpa >> PAGE_SHIFT);
610                 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
611                         ret = -EPERM;
612                         break;
613                 }
614
615                 /* Preserve offset within IOMMU page */
616                 hpa |= tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
617                 dirtmp = direction;
618
619                 /* The registered region is being unregistered */
620                 if (mm_iommu_mapped_inc(mem))
621                         break;
622
623                 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
624                 if (ret) {
625                         /* dirtmp cannot be DMA_NONE here */
626                         tce_iommu_unuse_page_v2(container, tbl, entry + i);
627                         pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
628                                         __func__, entry << tbl->it_page_shift,
629                                         tce, ret);
630                         break;
631                 }
632
633                 if (dirtmp != DMA_NONE)
634                         tce_iommu_unuse_page_v2(container, tbl, entry + i);
635
636                 *pua = tce;
637
638                 tce += IOMMU_PAGE_SIZE(tbl);
639         }
640
641         if (ret)
642                 tce_iommu_clear(container, tbl, entry, i);
643
644         return ret;
645 }
646
647 static long tce_iommu_create_table(struct tce_container *container,
648                         struct iommu_table_group *table_group,
649                         int num,
650                         __u32 page_shift,
651                         __u64 window_size,
652                         __u32 levels,
653                         struct iommu_table **ptbl)
654 {
655         long ret, table_size;
656
657         table_size = table_group->ops->get_table_size(page_shift, window_size,
658                         levels);
659         if (!table_size)
660                 return -EINVAL;
661
662         ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT);
663         if (ret)
664                 return ret;
665
666         ret = table_group->ops->create_table(table_group, num,
667                         page_shift, window_size, levels, ptbl);
668
669         WARN_ON(!ret && !(*ptbl)->it_ops->free);
670         WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
671
672         return ret;
673 }
674
675 static void tce_iommu_free_table(struct tce_container *container,
676                 struct iommu_table *tbl)
677 {
678         unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
679
680         tce_iommu_userspace_view_free(tbl, container->mm);
681         tbl->it_ops->free(tbl);
682         decrement_locked_vm(container->mm, pages);
683 }
684
685 static long tce_iommu_create_window(struct tce_container *container,
686                 __u32 page_shift, __u64 window_size, __u32 levels,
687                 __u64 *start_addr)
688 {
689         struct tce_iommu_group *tcegrp;
690         struct iommu_table_group *table_group;
691         struct iommu_table *tbl = NULL;
692         long ret, num;
693
694         num = tce_iommu_find_free_table(container);
695         if (num < 0)
696                 return num;
697
698         /* Get the first group for ops::create_table */
699         tcegrp = list_first_entry(&container->group_list,
700                         struct tce_iommu_group, next);
701         table_group = iommu_group_get_iommudata(tcegrp->grp);
702         if (!table_group)
703                 return -EFAULT;
704
705         if (!(table_group->pgsizes & (1ULL << page_shift)))
706                 return -EINVAL;
707
708         if (!table_group->ops->set_window || !table_group->ops->unset_window ||
709                         !table_group->ops->get_table_size ||
710                         !table_group->ops->create_table)
711                 return -EPERM;
712
713         /* Create TCE table */
714         ret = tce_iommu_create_table(container, table_group, num,
715                         page_shift, window_size, levels, &tbl);
716         if (ret)
717                 return ret;
718
719         BUG_ON(!tbl->it_ops->free);
720
721         /*
722          * Program the table to every group.
723          * Groups have been tested for compatibility at the attach time.
724          */
725         list_for_each_entry(tcegrp, &container->group_list, next) {
726                 table_group = iommu_group_get_iommudata(tcegrp->grp);
727
728                 ret = table_group->ops->set_window(table_group, num, tbl);
729                 if (ret)
730                         goto unset_exit;
731         }
732
733         container->tables[num] = tbl;
734
735         /* Return start address assigned by platform in create_table() */
736         *start_addr = tbl->it_offset << tbl->it_page_shift;
737
738         return 0;
739
740 unset_exit:
741         list_for_each_entry(tcegrp, &container->group_list, next) {
742                 table_group = iommu_group_get_iommudata(tcegrp->grp);
743                 table_group->ops->unset_window(table_group, num);
744         }
745         tce_iommu_free_table(container, tbl);
746
747         return ret;
748 }
749
750 static long tce_iommu_remove_window(struct tce_container *container,
751                 __u64 start_addr)
752 {
753         struct iommu_table_group *table_group = NULL;
754         struct iommu_table *tbl;
755         struct tce_iommu_group *tcegrp;
756         int num;
757
758         num = tce_iommu_find_table(container, start_addr, &tbl);
759         if (num < 0)
760                 return -EINVAL;
761
762         BUG_ON(!tbl->it_size);
763
764         /* Detach groups from IOMMUs */
765         list_for_each_entry(tcegrp, &container->group_list, next) {
766                 table_group = iommu_group_get_iommudata(tcegrp->grp);
767
768                 /*
769                  * SPAPR TCE IOMMU exposes the default DMA window to
770                  * the guest via dma32_window_start/size of
771                  * VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow
772                  * the userspace to remove this window, some do not so
773                  * here we check for the platform capability.
774                  */
775                 if (!table_group->ops || !table_group->ops->unset_window)
776                         return -EPERM;
777
778                 table_group->ops->unset_window(table_group, num);
779         }
780
781         /* Free table */
782         tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
783         tce_iommu_free_table(container, tbl);
784         container->tables[num] = NULL;
785
786         return 0;
787 }
788
789 static long tce_iommu_create_default_window(struct tce_container *container)
790 {
791         long ret;
792         __u64 start_addr = 0;
793         struct tce_iommu_group *tcegrp;
794         struct iommu_table_group *table_group;
795
796         if (!container->def_window_pending)
797                 return 0;
798
799         if (!tce_groups_attached(container))
800                 return -ENODEV;
801
802         tcegrp = list_first_entry(&container->group_list,
803                         struct tce_iommu_group, next);
804         table_group = iommu_group_get_iommudata(tcegrp->grp);
805         if (!table_group)
806                 return -ENODEV;
807
808         ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K,
809                         table_group->tce32_size, 1, &start_addr);
810         WARN_ON_ONCE(!ret && start_addr);
811
812         if (!ret)
813                 container->def_window_pending = false;
814
815         return ret;
816 }
817
818 static long tce_iommu_ioctl(void *iommu_data,
819                                  unsigned int cmd, unsigned long arg)
820 {
821         struct tce_container *container = iommu_data;
822         unsigned long minsz, ddwsz;
823         long ret;
824
825         switch (cmd) {
826         case VFIO_CHECK_EXTENSION:
827                 switch (arg) {
828                 case VFIO_SPAPR_TCE_IOMMU:
829                 case VFIO_SPAPR_TCE_v2_IOMMU:
830                         ret = 1;
831                         break;
832                 default:
833                         ret = vfio_spapr_iommu_eeh_ioctl(NULL, cmd, arg);
834                         break;
835                 }
836
837                 return (ret < 0) ? 0 : ret;
838         }
839
840         /*
841          * Sanity check to prevent one userspace from manipulating
842          * another userspace mm.
843          */
844         BUG_ON(!container);
845         if (container->mm && container->mm != current->mm)
846                 return -EPERM;
847
848         switch (cmd) {
849         case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
850                 struct vfio_iommu_spapr_tce_info info;
851                 struct tce_iommu_group *tcegrp;
852                 struct iommu_table_group *table_group;
853
854                 if (!tce_groups_attached(container))
855                         return -ENXIO;
856
857                 tcegrp = list_first_entry(&container->group_list,
858                                 struct tce_iommu_group, next);
859                 table_group = iommu_group_get_iommudata(tcegrp->grp);
860
861                 if (!table_group)
862                         return -ENXIO;
863
864                 minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
865                                 dma32_window_size);
866
867                 if (copy_from_user(&info, (void __user *)arg, minsz))
868                         return -EFAULT;
869
870                 if (info.argsz < minsz)
871                         return -EINVAL;
872
873                 info.dma32_window_start = table_group->tce32_start;
874                 info.dma32_window_size = table_group->tce32_size;
875                 info.flags = 0;
876                 memset(&info.ddw, 0, sizeof(info.ddw));
877
878                 if (table_group->max_dynamic_windows_supported &&
879                                 container->v2) {
880                         info.flags |= VFIO_IOMMU_SPAPR_INFO_DDW;
881                         info.ddw.pgsizes = table_group->pgsizes;
882                         info.ddw.max_dynamic_windows_supported =
883                                 table_group->max_dynamic_windows_supported;
884                         info.ddw.levels = table_group->max_levels;
885                 }
886
887                 ddwsz = offsetofend(struct vfio_iommu_spapr_tce_info, ddw);
888
889                 if (info.argsz >= ddwsz)
890                         minsz = ddwsz;
891
892                 if (copy_to_user((void __user *)arg, &info, minsz))
893                         return -EFAULT;
894
895                 return 0;
896         }
897         case VFIO_IOMMU_MAP_DMA: {
898                 struct vfio_iommu_type1_dma_map param;
899                 struct iommu_table *tbl = NULL;
900                 long num;
901                 enum dma_data_direction direction;
902
903                 if (!container->enabled)
904                         return -EPERM;
905
906                 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
907
908                 if (copy_from_user(&param, (void __user *)arg, minsz))
909                         return -EFAULT;
910
911                 if (param.argsz < minsz)
912                         return -EINVAL;
913
914                 if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ |
915                                 VFIO_DMA_MAP_FLAG_WRITE))
916                         return -EINVAL;
917
918                 ret = tce_iommu_create_default_window(container);
919                 if (ret)
920                         return ret;
921
922                 num = tce_iommu_find_table(container, param.iova, &tbl);
923                 if (num < 0)
924                         return -ENXIO;
925
926                 if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
927                                 (param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
928                         return -EINVAL;
929
930                 /* iova is checked by the IOMMU API */
931                 if (param.flags & VFIO_DMA_MAP_FLAG_READ) {
932                         if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
933                                 direction = DMA_BIDIRECTIONAL;
934                         else
935                                 direction = DMA_TO_DEVICE;
936                 } else {
937                         if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
938                                 direction = DMA_FROM_DEVICE;
939                         else
940                                 return -EINVAL;
941                 }
942
943                 ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr);
944                 if (ret)
945                         return ret;
946
947                 if (container->v2)
948                         ret = tce_iommu_build_v2(container, tbl,
949                                         param.iova >> tbl->it_page_shift,
950                                         param.vaddr,
951                                         param.size >> tbl->it_page_shift,
952                                         direction);
953                 else
954                         ret = tce_iommu_build(container, tbl,
955                                         param.iova >> tbl->it_page_shift,
956                                         param.vaddr,
957                                         param.size >> tbl->it_page_shift,
958                                         direction);
959
960                 iommu_flush_tce(tbl);
961
962                 return ret;
963         }
964         case VFIO_IOMMU_UNMAP_DMA: {
965                 struct vfio_iommu_type1_dma_unmap param;
966                 struct iommu_table *tbl = NULL;
967                 long num;
968
969                 if (!container->enabled)
970                         return -EPERM;
971
972                 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
973                                 size);
974
975                 if (copy_from_user(&param, (void __user *)arg, minsz))
976                         return -EFAULT;
977
978                 if (param.argsz < minsz)
979                         return -EINVAL;
980
981                 /* No flag is supported now */
982                 if (param.flags)
983                         return -EINVAL;
984
985                 ret = tce_iommu_create_default_window(container);
986                 if (ret)
987                         return ret;
988
989                 num = tce_iommu_find_table(container, param.iova, &tbl);
990                 if (num < 0)
991                         return -ENXIO;
992
993                 if (param.size & ~IOMMU_PAGE_MASK(tbl))
994                         return -EINVAL;
995
996                 ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
997                                 param.size >> tbl->it_page_shift);
998                 if (ret)
999                         return ret;
1000
1001                 ret = tce_iommu_clear(container, tbl,
1002                                 param.iova >> tbl->it_page_shift,
1003                                 param.size >> tbl->it_page_shift);
1004                 iommu_flush_tce(tbl);
1005
1006                 return ret;
1007         }
1008         case VFIO_IOMMU_SPAPR_REGISTER_MEMORY: {
1009                 struct vfio_iommu_spapr_register_memory param;
1010
1011                 if (!container->v2)
1012                         break;
1013
1014                 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
1015                                 size);
1016
1017                 ret = tce_iommu_mm_set(container);
1018                 if (ret)
1019                         return ret;
1020
1021                 if (copy_from_user(&param, (void __user *)arg, minsz))
1022                         return -EFAULT;
1023
1024                 if (param.argsz < minsz)
1025                         return -EINVAL;
1026
1027                 /* No flag is supported now */
1028                 if (param.flags)
1029                         return -EINVAL;
1030
1031                 mutex_lock(&container->lock);
1032                 ret = tce_iommu_register_pages(container, param.vaddr,
1033                                 param.size);
1034                 mutex_unlock(&container->lock);
1035
1036                 return ret;
1037         }
1038         case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY: {
1039                 struct vfio_iommu_spapr_register_memory param;
1040
1041                 if (!container->v2)
1042                         break;
1043
1044                 if (!container->mm)
1045                         return -EPERM;
1046
1047                 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
1048                                 size);
1049
1050                 if (copy_from_user(&param, (void __user *)arg, minsz))
1051                         return -EFAULT;
1052
1053                 if (param.argsz < minsz)
1054                         return -EINVAL;
1055
1056                 /* No flag is supported now */
1057                 if (param.flags)
1058                         return -EINVAL;
1059
1060                 mutex_lock(&container->lock);
1061                 ret = tce_iommu_unregister_pages(container, param.vaddr,
1062                                 param.size);
1063                 mutex_unlock(&container->lock);
1064
1065                 return ret;
1066         }
1067         case VFIO_IOMMU_ENABLE:
1068                 if (container->v2)
1069                         break;
1070
1071                 mutex_lock(&container->lock);
1072                 ret = tce_iommu_enable(container);
1073                 mutex_unlock(&container->lock);
1074                 return ret;
1075
1076
1077         case VFIO_IOMMU_DISABLE:
1078                 if (container->v2)
1079                         break;
1080
1081                 mutex_lock(&container->lock);
1082                 tce_iommu_disable(container);
1083                 mutex_unlock(&container->lock);
1084                 return 0;
1085
1086         case VFIO_EEH_PE_OP: {
1087                 struct tce_iommu_group *tcegrp;
1088
1089                 ret = 0;
1090                 list_for_each_entry(tcegrp, &container->group_list, next) {
1091                         ret = vfio_spapr_iommu_eeh_ioctl(tcegrp->grp,
1092                                         cmd, arg);
1093                         if (ret)
1094                                 return ret;
1095                 }
1096                 return ret;
1097         }
1098
1099         case VFIO_IOMMU_SPAPR_TCE_CREATE: {
1100                 struct vfio_iommu_spapr_tce_create create;
1101
1102                 if (!container->v2)
1103                         break;
1104
1105                 ret = tce_iommu_mm_set(container);
1106                 if (ret)
1107                         return ret;
1108
1109                 if (!tce_groups_attached(container))
1110                         return -ENXIO;
1111
1112                 minsz = offsetofend(struct vfio_iommu_spapr_tce_create,
1113                                 start_addr);
1114
1115                 if (copy_from_user(&create, (void __user *)arg, minsz))
1116                         return -EFAULT;
1117
1118                 if (create.argsz < minsz)
1119                         return -EINVAL;
1120
1121                 if (create.flags)
1122                         return -EINVAL;
1123
1124                 mutex_lock(&container->lock);
1125
1126                 ret = tce_iommu_create_default_window(container);
1127                 if (!ret)
1128                         ret = tce_iommu_create_window(container,
1129                                         create.page_shift,
1130                                         create.window_size, create.levels,
1131                                         &create.start_addr);
1132
1133                 mutex_unlock(&container->lock);
1134
1135                 if (!ret && copy_to_user((void __user *)arg, &create, minsz))
1136                         ret = -EFAULT;
1137
1138                 return ret;
1139         }
1140         case VFIO_IOMMU_SPAPR_TCE_REMOVE: {
1141                 struct vfio_iommu_spapr_tce_remove remove;
1142
1143                 if (!container->v2)
1144                         break;
1145
1146                 ret = tce_iommu_mm_set(container);
1147                 if (ret)
1148                         return ret;
1149
1150                 if (!tce_groups_attached(container))
1151                         return -ENXIO;
1152
1153                 minsz = offsetofend(struct vfio_iommu_spapr_tce_remove,
1154                                 start_addr);
1155
1156                 if (copy_from_user(&remove, (void __user *)arg, minsz))
1157                         return -EFAULT;
1158
1159                 if (remove.argsz < minsz)
1160                         return -EINVAL;
1161
1162                 if (remove.flags)
1163                         return -EINVAL;
1164
1165                 if (container->def_window_pending && !remove.start_addr) {
1166                         container->def_window_pending = false;
1167                         return 0;
1168                 }
1169
1170                 mutex_lock(&container->lock);
1171
1172                 ret = tce_iommu_remove_window(container, remove.start_addr);
1173
1174                 mutex_unlock(&container->lock);
1175
1176                 return ret;
1177         }
1178         }
1179
1180         return -ENOTTY;
1181 }
1182
1183 static void tce_iommu_release_ownership(struct tce_container *container,
1184                 struct iommu_table_group *table_group)
1185 {
1186         int i;
1187
1188         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1189                 struct iommu_table *tbl = container->tables[i];
1190
1191                 if (!tbl)
1192                         continue;
1193
1194                 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
1195                 tce_iommu_userspace_view_free(tbl, container->mm);
1196                 if (tbl->it_map)
1197                         iommu_release_ownership(tbl);
1198
1199                 container->tables[i] = NULL;
1200         }
1201 }
1202
1203 static int tce_iommu_take_ownership(struct tce_container *container,
1204                 struct iommu_table_group *table_group)
1205 {
1206         int i, j, rc = 0;
1207
1208         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1209                 struct iommu_table *tbl = table_group->tables[i];
1210
1211                 if (!tbl || !tbl->it_map)
1212                         continue;
1213
1214                 rc = iommu_take_ownership(tbl);
1215                 if (rc) {
1216                         for (j = 0; j < i; ++j)
1217                                 iommu_release_ownership(
1218                                                 table_group->tables[j]);
1219
1220                         return rc;
1221                 }
1222         }
1223
1224         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1225                 container->tables[i] = table_group->tables[i];
1226
1227         return 0;
1228 }
1229
1230 static void tce_iommu_release_ownership_ddw(struct tce_container *container,
1231                 struct iommu_table_group *table_group)
1232 {
1233         long i;
1234
1235         if (!table_group->ops->unset_window) {
1236                 WARN_ON_ONCE(1);
1237                 return;
1238         }
1239
1240         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1241                 table_group->ops->unset_window(table_group, i);
1242
1243         table_group->ops->release_ownership(table_group);
1244 }
1245
1246 static long tce_iommu_take_ownership_ddw(struct tce_container *container,
1247                 struct iommu_table_group *table_group)
1248 {
1249         long i, ret = 0;
1250
1251         if (!table_group->ops->create_table || !table_group->ops->set_window ||
1252                         !table_group->ops->release_ownership) {
1253                 WARN_ON_ONCE(1);
1254                 return -EFAULT;
1255         }
1256
1257         table_group->ops->take_ownership(table_group);
1258
1259         /* Set all windows to the new group */
1260         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1261                 struct iommu_table *tbl = container->tables[i];
1262
1263                 if (!tbl)
1264                         continue;
1265
1266                 ret = table_group->ops->set_window(table_group, i, tbl);
1267                 if (ret)
1268                         goto release_exit;
1269         }
1270
1271         return 0;
1272
1273 release_exit:
1274         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1275                 table_group->ops->unset_window(table_group, i);
1276
1277         table_group->ops->release_ownership(table_group);
1278
1279         return ret;
1280 }
1281
1282 static int tce_iommu_attach_group(void *iommu_data,
1283                 struct iommu_group *iommu_group)
1284 {
1285         int ret;
1286         struct tce_container *container = iommu_data;
1287         struct iommu_table_group *table_group;
1288         struct tce_iommu_group *tcegrp = NULL;
1289
1290         mutex_lock(&container->lock);
1291
1292         /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
1293                         iommu_group_id(iommu_group), iommu_group); */
1294         table_group = iommu_group_get_iommudata(iommu_group);
1295         if (!table_group) {
1296                 ret = -ENODEV;
1297                 goto unlock_exit;
1298         }
1299
1300         if (tce_groups_attached(container) && (!table_group->ops ||
1301                         !table_group->ops->take_ownership ||
1302                         !table_group->ops->release_ownership)) {
1303                 ret = -EBUSY;
1304                 goto unlock_exit;
1305         }
1306
1307         /* Check if new group has the same iommu_ops (i.e. compatible) */
1308         list_for_each_entry(tcegrp, &container->group_list, next) {
1309                 struct iommu_table_group *table_group_tmp;
1310
1311                 if (tcegrp->grp == iommu_group) {
1312                         pr_warn("tce_vfio: Group %d is already attached\n",
1313                                         iommu_group_id(iommu_group));
1314                         ret = -EBUSY;
1315                         goto unlock_exit;
1316                 }
1317                 table_group_tmp = iommu_group_get_iommudata(tcegrp->grp);
1318                 if (table_group_tmp->ops->create_table !=
1319                                 table_group->ops->create_table) {
1320                         pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
1321                                         iommu_group_id(iommu_group),
1322                                         iommu_group_id(tcegrp->grp));
1323                         ret = -EPERM;
1324                         goto unlock_exit;
1325                 }
1326         }
1327
1328         tcegrp = kzalloc(sizeof(*tcegrp), GFP_KERNEL);
1329         if (!tcegrp) {
1330                 ret = -ENOMEM;
1331                 goto unlock_exit;
1332         }
1333
1334         if (!table_group->ops || !table_group->ops->take_ownership ||
1335                         !table_group->ops->release_ownership) {
1336                 if (container->v2) {
1337                         ret = -EPERM;
1338                         goto unlock_exit;
1339                 }
1340                 ret = tce_iommu_take_ownership(container, table_group);
1341         } else {
1342                 if (!container->v2) {
1343                         ret = -EPERM;
1344                         goto unlock_exit;
1345                 }
1346                 ret = tce_iommu_take_ownership_ddw(container, table_group);
1347                 if (!tce_groups_attached(container) && !container->tables[0])
1348                         container->def_window_pending = true;
1349         }
1350
1351         if (!ret) {
1352                 tcegrp->grp = iommu_group;
1353                 list_add(&tcegrp->next, &container->group_list);
1354         }
1355
1356 unlock_exit:
1357         if (ret && tcegrp)
1358                 kfree(tcegrp);
1359
1360         mutex_unlock(&container->lock);
1361
1362         return ret;
1363 }
1364
1365 static void tce_iommu_detach_group(void *iommu_data,
1366                 struct iommu_group *iommu_group)
1367 {
1368         struct tce_container *container = iommu_data;
1369         struct iommu_table_group *table_group;
1370         bool found = false;
1371         struct tce_iommu_group *tcegrp;
1372
1373         mutex_lock(&container->lock);
1374
1375         list_for_each_entry(tcegrp, &container->group_list, next) {
1376                 if (tcegrp->grp == iommu_group) {
1377                         found = true;
1378                         break;
1379                 }
1380         }
1381
1382         if (!found) {
1383                 pr_warn("tce_vfio: detaching unattached group #%u\n",
1384                                 iommu_group_id(iommu_group));
1385                 goto unlock_exit;
1386         }
1387
1388         list_del(&tcegrp->next);
1389         kfree(tcegrp);
1390
1391         table_group = iommu_group_get_iommudata(iommu_group);
1392         BUG_ON(!table_group);
1393
1394         if (!table_group->ops || !table_group->ops->release_ownership)
1395                 tce_iommu_release_ownership(container, table_group);
1396         else
1397                 tce_iommu_release_ownership_ddw(container, table_group);
1398
1399 unlock_exit:
1400         mutex_unlock(&container->lock);
1401 }
1402
1403 const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
1404         .name           = "iommu-vfio-powerpc",
1405         .owner          = THIS_MODULE,
1406         .open           = tce_iommu_open,
1407         .release        = tce_iommu_release,
1408         .ioctl          = tce_iommu_ioctl,
1409         .attach_group   = tce_iommu_attach_group,
1410         .detach_group   = tce_iommu_detach_group,
1411 };
1412
1413 static int __init tce_iommu_init(void)
1414 {
1415         return vfio_register_iommu_driver(&tce_iommu_driver_ops);
1416 }
1417
1418 static void __exit tce_iommu_cleanup(void)
1419 {
1420         vfio_unregister_iommu_driver(&tce_iommu_driver_ops);
1421 }
1422
1423 module_init(tce_iommu_init);
1424 module_exit(tce_iommu_cleanup);
1425
1426 MODULE_VERSION(DRIVER_VERSION);
1427 MODULE_LICENSE("GPL v2");
1428 MODULE_AUTHOR(DRIVER_AUTHOR);
1429 MODULE_DESCRIPTION(DRIVER_DESC);
1430