GNU Linux-libre 4.19.245-gnu1
[releases.git] / drivers / vfio / vfio_iommu_spapr_tce.c
1 /*
2  * VFIO: IOMMU DMA mapping support for TCE on POWER
3  *
4  * Copyright (C) 2013 IBM Corp.  All rights reserved.
5  *     Author: Alexey Kardashevskiy <aik@ozlabs.ru>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * Derived from original vfio_iommu_type1.c:
12  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
13  *     Author: Alex Williamson <alex.williamson@redhat.com>
14  */
15
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <linux/err.h>
21 #include <linux/vfio.h>
22 #include <linux/vmalloc.h>
23 #include <linux/sched/mm.h>
24 #include <linux/sched/signal.h>
25
26 #include <asm/iommu.h>
27 #include <asm/tce.h>
28 #include <asm/mmu_context.h>
29
30 #define DRIVER_VERSION  "0.1"
31 #define DRIVER_AUTHOR   "aik@ozlabs.ru"
32 #define DRIVER_DESC     "VFIO IOMMU SPAPR TCE"
33
34 static void tce_iommu_detach_group(void *iommu_data,
35                 struct iommu_group *iommu_group);
36
37 static long try_increment_locked_vm(struct mm_struct *mm, long npages)
38 {
39         long ret = 0, locked, lock_limit;
40
41         if (WARN_ON_ONCE(!mm))
42                 return -EPERM;
43
44         if (!npages)
45                 return 0;
46
47         down_write(&mm->mmap_sem);
48         locked = mm->locked_vm + npages;
49         lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
50         if (locked > lock_limit && !capable(CAP_IPC_LOCK))
51                 ret = -ENOMEM;
52         else
53                 mm->locked_vm += npages;
54
55         pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
56                         npages << PAGE_SHIFT,
57                         mm->locked_vm << PAGE_SHIFT,
58                         rlimit(RLIMIT_MEMLOCK),
59                         ret ? " - exceeded" : "");
60
61         up_write(&mm->mmap_sem);
62
63         return ret;
64 }
65
66 static void decrement_locked_vm(struct mm_struct *mm, long npages)
67 {
68         if (!mm || !npages)
69                 return;
70
71         down_write(&mm->mmap_sem);
72         if (WARN_ON_ONCE(npages > mm->locked_vm))
73                 npages = mm->locked_vm;
74         mm->locked_vm -= npages;
75         pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
76                         npages << PAGE_SHIFT,
77                         mm->locked_vm << PAGE_SHIFT,
78                         rlimit(RLIMIT_MEMLOCK));
79         up_write(&mm->mmap_sem);
80 }
81
82 /*
83  * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
84  *
85  * This code handles mapping and unmapping of user data buffers
86  * into DMA'ble space using the IOMMU
87  */
88
89 struct tce_iommu_group {
90         struct list_head next;
91         struct iommu_group *grp;
92 };
93
94 /*
95  * A container needs to remember which preregistered region  it has
96  * referenced to do proper cleanup at the userspace process exit.
97  */
98 struct tce_iommu_prereg {
99         struct list_head next;
100         struct mm_iommu_table_group_mem_t *mem;
101 };
102
103 /*
104  * The container descriptor supports only a single group per container.
105  * Required by the API as the container is not supplied with the IOMMU group
106  * at the moment of initialization.
107  */
108 struct tce_container {
109         struct mutex lock;
110         bool enabled;
111         bool v2;
112         bool def_window_pending;
113         unsigned long locked_pages;
114         struct mm_struct *mm;
115         struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
116         struct list_head group_list;
117         struct list_head prereg_list;
118 };
119
120 static long tce_iommu_mm_set(struct tce_container *container)
121 {
122         if (container->mm) {
123                 if (container->mm == current->mm)
124                         return 0;
125                 return -EPERM;
126         }
127         BUG_ON(!current->mm);
128         container->mm = current->mm;
129         atomic_inc(&container->mm->mm_count);
130
131         return 0;
132 }
133
134 static long tce_iommu_prereg_free(struct tce_container *container,
135                 struct tce_iommu_prereg *tcemem)
136 {
137         long ret;
138
139         ret = mm_iommu_put(container->mm, tcemem->mem);
140         if (ret)
141                 return ret;
142
143         list_del(&tcemem->next);
144         kfree(tcemem);
145
146         return 0;
147 }
148
149 static long tce_iommu_unregister_pages(struct tce_container *container,
150                 __u64 vaddr, __u64 size)
151 {
152         struct mm_iommu_table_group_mem_t *mem;
153         struct tce_iommu_prereg *tcemem;
154         bool found = false;
155
156         if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
157                 return -EINVAL;
158
159         mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT);
160         if (!mem)
161                 return -ENOENT;
162
163         list_for_each_entry(tcemem, &container->prereg_list, next) {
164                 if (tcemem->mem == mem) {
165                         found = true;
166                         break;
167                 }
168         }
169
170         if (!found)
171                 return -ENOENT;
172
173         return tce_iommu_prereg_free(container, tcemem);
174 }
175
176 static long tce_iommu_register_pages(struct tce_container *container,
177                 __u64 vaddr, __u64 size)
178 {
179         long ret = 0;
180         struct mm_iommu_table_group_mem_t *mem = NULL;
181         struct tce_iommu_prereg *tcemem;
182         unsigned long entries = size >> PAGE_SHIFT;
183
184         if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
185                         ((vaddr + size) < vaddr))
186                 return -EINVAL;
187
188         mem = mm_iommu_find(container->mm, vaddr, entries);
189         if (mem) {
190                 list_for_each_entry(tcemem, &container->prereg_list, next) {
191                         if (tcemem->mem == mem)
192                                 return -EBUSY;
193                 }
194         }
195
196         ret = mm_iommu_get(container->mm, vaddr, entries, &mem);
197         if (ret)
198                 return ret;
199
200         tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
201         if (!tcemem) {
202                 mm_iommu_put(container->mm, mem);
203                 return -ENOMEM;
204         }
205
206         tcemem->mem = mem;
207         list_add(&tcemem->next, &container->prereg_list);
208
209         container->enabled = true;
210
211         return 0;
212 }
213
214 static bool tce_page_is_contained(struct page *page, unsigned page_shift)
215 {
216         /*
217          * Check that the TCE table granularity is not bigger than the size of
218          * a page we just found. Otherwise the hardware can get access to
219          * a bigger memory chunk that it should.
220          */
221         return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift;
222 }
223
224 static inline bool tce_groups_attached(struct tce_container *container)
225 {
226         return !list_empty(&container->group_list);
227 }
228
229 static long tce_iommu_find_table(struct tce_container *container,
230                 phys_addr_t ioba, struct iommu_table **ptbl)
231 {
232         long i;
233
234         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
235                 struct iommu_table *tbl = container->tables[i];
236
237                 if (tbl) {
238                         unsigned long entry = ioba >> tbl->it_page_shift;
239                         unsigned long start = tbl->it_offset;
240                         unsigned long end = start + tbl->it_size;
241
242                         if ((start <= entry) && (entry < end)) {
243                                 *ptbl = tbl;
244                                 return i;
245                         }
246                 }
247         }
248
249         return -1;
250 }
251
252 static int tce_iommu_find_free_table(struct tce_container *container)
253 {
254         int i;
255
256         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
257                 if (!container->tables[i])
258                         return i;
259         }
260
261         return -ENOSPC;
262 }
263
264 static int tce_iommu_enable(struct tce_container *container)
265 {
266         int ret = 0;
267         unsigned long locked;
268         struct iommu_table_group *table_group;
269         struct tce_iommu_group *tcegrp;
270
271         if (container->enabled)
272                 return -EBUSY;
273
274         /*
275          * When userspace pages are mapped into the IOMMU, they are effectively
276          * locked memory, so, theoretically, we need to update the accounting
277          * of locked pages on each map and unmap.  For powerpc, the map unmap
278          * paths can be very hot, though, and the accounting would kill
279          * performance, especially since it would be difficult to impossible
280          * to handle the accounting in real mode only.
281          *
282          * To address that, rather than precisely accounting every page, we
283          * instead account for a worst case on locked memory when the iommu is
284          * enabled and disabled.  The worst case upper bound on locked memory
285          * is the size of the whole iommu window, which is usually relatively
286          * small (compared to total memory sizes) on POWER hardware.
287          *
288          * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
289          * that would effectively kill the guest at random points, much better
290          * enforcing the limit based on the max that the guest can map.
291          *
292          * Unfortunately at the moment it counts whole tables, no matter how
293          * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
294          * each with 2GB DMA window, 8GB will be counted here. The reason for
295          * this is that we cannot tell here the amount of RAM used by the guest
296          * as this information is only available from KVM and VFIO is
297          * KVM agnostic.
298          *
299          * So we do not allow enabling a container without a group attached
300          * as there is no way to know how much we should increment
301          * the locked_vm counter.
302          */
303         if (!tce_groups_attached(container))
304                 return -ENODEV;
305
306         tcegrp = list_first_entry(&container->group_list,
307                         struct tce_iommu_group, next);
308         table_group = iommu_group_get_iommudata(tcegrp->grp);
309         if (!table_group)
310                 return -ENODEV;
311
312         if (!table_group->tce32_size)
313                 return -EPERM;
314
315         ret = tce_iommu_mm_set(container);
316         if (ret)
317                 return ret;
318
319         locked = table_group->tce32_size >> PAGE_SHIFT;
320         ret = try_increment_locked_vm(container->mm, locked);
321         if (ret)
322                 return ret;
323
324         container->locked_pages = locked;
325
326         container->enabled = true;
327
328         return ret;
329 }
330
331 static void tce_iommu_disable(struct tce_container *container)
332 {
333         if (!container->enabled)
334                 return;
335
336         container->enabled = false;
337
338         BUG_ON(!container->mm);
339         decrement_locked_vm(container->mm, container->locked_pages);
340 }
341
342 static void *tce_iommu_open(unsigned long arg)
343 {
344         struct tce_container *container;
345
346         if ((arg != VFIO_SPAPR_TCE_IOMMU) && (arg != VFIO_SPAPR_TCE_v2_IOMMU)) {
347                 pr_err("tce_vfio: Wrong IOMMU type\n");
348                 return ERR_PTR(-EINVAL);
349         }
350
351         container = kzalloc(sizeof(*container), GFP_KERNEL);
352         if (!container)
353                 return ERR_PTR(-ENOMEM);
354
355         mutex_init(&container->lock);
356         INIT_LIST_HEAD_RCU(&container->group_list);
357         INIT_LIST_HEAD_RCU(&container->prereg_list);
358
359         container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
360
361         return container;
362 }
363
364 static int tce_iommu_clear(struct tce_container *container,
365                 struct iommu_table *tbl,
366                 unsigned long entry, unsigned long pages);
367 static void tce_iommu_free_table(struct tce_container *container,
368                 struct iommu_table *tbl);
369
370 static void tce_iommu_release(void *iommu_data)
371 {
372         struct tce_container *container = iommu_data;
373         struct tce_iommu_group *tcegrp;
374         struct tce_iommu_prereg *tcemem, *tmtmp;
375         long i;
376
377         while (tce_groups_attached(container)) {
378                 tcegrp = list_first_entry(&container->group_list,
379                                 struct tce_iommu_group, next);
380                 tce_iommu_detach_group(iommu_data, tcegrp->grp);
381         }
382
383         /*
384          * If VFIO created a table, it was not disposed
385          * by tce_iommu_detach_group() so do it now.
386          */
387         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
388                 struct iommu_table *tbl = container->tables[i];
389
390                 if (!tbl)
391                         continue;
392
393                 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
394                 tce_iommu_free_table(container, tbl);
395         }
396
397         list_for_each_entry_safe(tcemem, tmtmp, &container->prereg_list, next)
398                 WARN_ON(tce_iommu_prereg_free(container, tcemem));
399
400         tce_iommu_disable(container);
401         if (container->mm)
402                 mmdrop(container->mm);
403         mutex_destroy(&container->lock);
404
405         kfree(container);
406 }
407
408 static void tce_iommu_unuse_page(struct tce_container *container,
409                 unsigned long hpa)
410 {
411         struct page *page;
412
413         page = pfn_to_page(hpa >> PAGE_SHIFT);
414         put_page(page);
415 }
416
417 static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
418                 unsigned long tce, unsigned long shift,
419                 unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
420 {
421         long ret = 0;
422         struct mm_iommu_table_group_mem_t *mem;
423
424         mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift);
425         if (!mem)
426                 return -EINVAL;
427
428         ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa);
429         if (ret)
430                 return -EINVAL;
431
432         *pmem = mem;
433
434         return 0;
435 }
436
437 static void tce_iommu_unuse_page_v2(struct tce_container *container,
438                 struct iommu_table *tbl, unsigned long entry)
439 {
440         struct mm_iommu_table_group_mem_t *mem = NULL;
441         int ret;
442         unsigned long hpa = 0;
443         __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
444
445         if (!pua)
446                 return;
447
448         ret = tce_iommu_prereg_ua_to_hpa(container, be64_to_cpu(*pua),
449                         tbl->it_page_shift, &hpa, &mem);
450         if (ret)
451                 pr_debug("%s: tce %llx at #%lx was not cached, ret=%d\n",
452                                 __func__, be64_to_cpu(*pua), entry, ret);
453         if (mem)
454                 mm_iommu_mapped_dec(mem);
455
456         *pua = cpu_to_be64(0);
457 }
458
459 static int tce_iommu_clear(struct tce_container *container,
460                 struct iommu_table *tbl,
461                 unsigned long entry, unsigned long pages)
462 {
463         unsigned long oldhpa;
464         long ret;
465         enum dma_data_direction direction;
466
467         for ( ; pages; --pages, ++entry) {
468                 cond_resched();
469
470                 direction = DMA_NONE;
471                 oldhpa = 0;
472                 ret = iommu_tce_xchg(tbl, entry, &oldhpa, &direction);
473                 if (ret)
474                         continue;
475
476                 if (direction == DMA_NONE)
477                         continue;
478
479                 if (container->v2) {
480                         tce_iommu_unuse_page_v2(container, tbl, entry);
481                         continue;
482                 }
483
484                 tce_iommu_unuse_page(container, oldhpa);
485         }
486
487         return 0;
488 }
489
490 static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
491 {
492         struct page *page = NULL;
493         enum dma_data_direction direction = iommu_tce_direction(tce);
494
495         if (get_user_pages_fast(tce & PAGE_MASK, 1,
496                         direction != DMA_TO_DEVICE, &page) != 1)
497                 return -EFAULT;
498
499         *hpa = __pa((unsigned long) page_address(page));
500
501         return 0;
502 }
503
504 static long tce_iommu_build(struct tce_container *container,
505                 struct iommu_table *tbl,
506                 unsigned long entry, unsigned long tce, unsigned long pages,
507                 enum dma_data_direction direction)
508 {
509         long i, ret = 0;
510         struct page *page;
511         unsigned long hpa;
512         enum dma_data_direction dirtmp;
513
514         for (i = 0; i < pages; ++i) {
515                 unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
516
517                 ret = tce_iommu_use_page(tce, &hpa);
518                 if (ret)
519                         break;
520
521                 page = pfn_to_page(hpa >> PAGE_SHIFT);
522                 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
523                         ret = -EPERM;
524                         break;
525                 }
526
527                 hpa |= offset;
528                 dirtmp = direction;
529                 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
530                 if (ret) {
531                         tce_iommu_unuse_page(container, hpa);
532                         pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
533                                         __func__, entry << tbl->it_page_shift,
534                                         tce, ret);
535                         break;
536                 }
537
538                 if (dirtmp != DMA_NONE)
539                         tce_iommu_unuse_page(container, hpa);
540
541                 tce += IOMMU_PAGE_SIZE(tbl);
542         }
543
544         if (ret)
545                 tce_iommu_clear(container, tbl, entry, i);
546
547         return ret;
548 }
549
550 static long tce_iommu_build_v2(struct tce_container *container,
551                 struct iommu_table *tbl,
552                 unsigned long entry, unsigned long tce, unsigned long pages,
553                 enum dma_data_direction direction)
554 {
555         long i, ret = 0;
556         struct page *page;
557         unsigned long hpa;
558         enum dma_data_direction dirtmp;
559
560         for (i = 0; i < pages; ++i) {
561                 struct mm_iommu_table_group_mem_t *mem = NULL;
562                 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry + i);
563
564                 ret = tce_iommu_prereg_ua_to_hpa(container,
565                                 tce, tbl->it_page_shift, &hpa, &mem);
566                 if (ret)
567                         break;
568
569                 page = pfn_to_page(hpa >> PAGE_SHIFT);
570                 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
571                         ret = -EPERM;
572                         break;
573                 }
574
575                 /* Preserve offset within IOMMU page */
576                 hpa |= tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
577                 dirtmp = direction;
578
579                 /* The registered region is being unregistered */
580                 if (mm_iommu_mapped_inc(mem))
581                         break;
582
583                 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
584                 if (ret) {
585                         /* dirtmp cannot be DMA_NONE here */
586                         tce_iommu_unuse_page_v2(container, tbl, entry + i);
587                         pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
588                                         __func__, entry << tbl->it_page_shift,
589                                         tce, ret);
590                         break;
591                 }
592
593                 if (dirtmp != DMA_NONE)
594                         tce_iommu_unuse_page_v2(container, tbl, entry + i);
595
596                 *pua = cpu_to_be64(tce);
597
598                 tce += IOMMU_PAGE_SIZE(tbl);
599         }
600
601         if (ret)
602                 tce_iommu_clear(container, tbl, entry, i);
603
604         return ret;
605 }
606
607 static long tce_iommu_create_table(struct tce_container *container,
608                         struct iommu_table_group *table_group,
609                         int num,
610                         __u32 page_shift,
611                         __u64 window_size,
612                         __u32 levels,
613                         struct iommu_table **ptbl)
614 {
615         long ret, table_size;
616
617         table_size = table_group->ops->get_table_size(page_shift, window_size,
618                         levels);
619         if (!table_size)
620                 return -EINVAL;
621
622         ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT);
623         if (ret)
624                 return ret;
625
626         ret = table_group->ops->create_table(table_group, num,
627                         page_shift, window_size, levels, ptbl);
628
629         WARN_ON(!ret && !(*ptbl)->it_ops->free);
630         WARN_ON(!ret && ((*ptbl)->it_allocated_size > table_size));
631
632         return ret;
633 }
634
635 static void tce_iommu_free_table(struct tce_container *container,
636                 struct iommu_table *tbl)
637 {
638         unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
639
640         iommu_tce_table_put(tbl);
641         decrement_locked_vm(container->mm, pages);
642 }
643
644 static long tce_iommu_create_window(struct tce_container *container,
645                 __u32 page_shift, __u64 window_size, __u32 levels,
646                 __u64 *start_addr)
647 {
648         struct tce_iommu_group *tcegrp;
649         struct iommu_table_group *table_group;
650         struct iommu_table *tbl = NULL;
651         long ret, num;
652
653         num = tce_iommu_find_free_table(container);
654         if (num < 0)
655                 return num;
656
657         /* Get the first group for ops::create_table */
658         tcegrp = list_first_entry(&container->group_list,
659                         struct tce_iommu_group, next);
660         table_group = iommu_group_get_iommudata(tcegrp->grp);
661         if (!table_group)
662                 return -EFAULT;
663
664         if (!(table_group->pgsizes & (1ULL << page_shift)))
665                 return -EINVAL;
666
667         if (!table_group->ops->set_window || !table_group->ops->unset_window ||
668                         !table_group->ops->get_table_size ||
669                         !table_group->ops->create_table)
670                 return -EPERM;
671
672         /* Create TCE table */
673         ret = tce_iommu_create_table(container, table_group, num,
674                         page_shift, window_size, levels, &tbl);
675         if (ret)
676                 return ret;
677
678         BUG_ON(!tbl->it_ops->free);
679
680         /*
681          * Program the table to every group.
682          * Groups have been tested for compatibility at the attach time.
683          */
684         list_for_each_entry(tcegrp, &container->group_list, next) {
685                 table_group = iommu_group_get_iommudata(tcegrp->grp);
686
687                 ret = table_group->ops->set_window(table_group, num, tbl);
688                 if (ret)
689                         goto unset_exit;
690         }
691
692         container->tables[num] = tbl;
693
694         /* Return start address assigned by platform in create_table() */
695         *start_addr = tbl->it_offset << tbl->it_page_shift;
696
697         return 0;
698
699 unset_exit:
700         list_for_each_entry(tcegrp, &container->group_list, next) {
701                 table_group = iommu_group_get_iommudata(tcegrp->grp);
702                 table_group->ops->unset_window(table_group, num);
703         }
704         tce_iommu_free_table(container, tbl);
705
706         return ret;
707 }
708
709 static long tce_iommu_remove_window(struct tce_container *container,
710                 __u64 start_addr)
711 {
712         struct iommu_table_group *table_group = NULL;
713         struct iommu_table *tbl;
714         struct tce_iommu_group *tcegrp;
715         int num;
716
717         num = tce_iommu_find_table(container, start_addr, &tbl);
718         if (num < 0)
719                 return -EINVAL;
720
721         BUG_ON(!tbl->it_size);
722
723         /* Detach groups from IOMMUs */
724         list_for_each_entry(tcegrp, &container->group_list, next) {
725                 table_group = iommu_group_get_iommudata(tcegrp->grp);
726
727                 /*
728                  * SPAPR TCE IOMMU exposes the default DMA window to
729                  * the guest via dma32_window_start/size of
730                  * VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow
731                  * the userspace to remove this window, some do not so
732                  * here we check for the platform capability.
733                  */
734                 if (!table_group->ops || !table_group->ops->unset_window)
735                         return -EPERM;
736
737                 table_group->ops->unset_window(table_group, num);
738         }
739
740         /* Free table */
741         tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
742         tce_iommu_free_table(container, tbl);
743         container->tables[num] = NULL;
744
745         return 0;
746 }
747
748 static long tce_iommu_create_default_window(struct tce_container *container)
749 {
750         long ret;
751         __u64 start_addr = 0;
752         struct tce_iommu_group *tcegrp;
753         struct iommu_table_group *table_group;
754
755         if (!container->def_window_pending)
756                 return 0;
757
758         if (!tce_groups_attached(container))
759                 return -ENODEV;
760
761         tcegrp = list_first_entry(&container->group_list,
762                         struct tce_iommu_group, next);
763         table_group = iommu_group_get_iommudata(tcegrp->grp);
764         if (!table_group)
765                 return -ENODEV;
766
767         ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K,
768                         table_group->tce32_size, 1, &start_addr);
769         WARN_ON_ONCE(!ret && start_addr);
770
771         if (!ret)
772                 container->def_window_pending = false;
773
774         return ret;
775 }
776
777 static long tce_iommu_ioctl(void *iommu_data,
778                                  unsigned int cmd, unsigned long arg)
779 {
780         struct tce_container *container = iommu_data;
781         unsigned long minsz, ddwsz;
782         long ret;
783
784         switch (cmd) {
785         case VFIO_CHECK_EXTENSION:
786                 switch (arg) {
787                 case VFIO_SPAPR_TCE_IOMMU:
788                 case VFIO_SPAPR_TCE_v2_IOMMU:
789                         ret = 1;
790                         break;
791                 default:
792                         ret = vfio_spapr_iommu_eeh_ioctl(NULL, cmd, arg);
793                         break;
794                 }
795
796                 return (ret < 0) ? 0 : ret;
797         }
798
799         /*
800          * Sanity check to prevent one userspace from manipulating
801          * another userspace mm.
802          */
803         BUG_ON(!container);
804         if (container->mm && container->mm != current->mm)
805                 return -EPERM;
806
807         switch (cmd) {
808         case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
809                 struct vfio_iommu_spapr_tce_info info;
810                 struct tce_iommu_group *tcegrp;
811                 struct iommu_table_group *table_group;
812
813                 if (!tce_groups_attached(container))
814                         return -ENXIO;
815
816                 tcegrp = list_first_entry(&container->group_list,
817                                 struct tce_iommu_group, next);
818                 table_group = iommu_group_get_iommudata(tcegrp->grp);
819
820                 if (!table_group)
821                         return -ENXIO;
822
823                 minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
824                                 dma32_window_size);
825
826                 if (copy_from_user(&info, (void __user *)arg, minsz))
827                         return -EFAULT;
828
829                 if (info.argsz < minsz)
830                         return -EINVAL;
831
832                 info.dma32_window_start = table_group->tce32_start;
833                 info.dma32_window_size = table_group->tce32_size;
834                 info.flags = 0;
835                 memset(&info.ddw, 0, sizeof(info.ddw));
836
837                 if (table_group->max_dynamic_windows_supported &&
838                                 container->v2) {
839                         info.flags |= VFIO_IOMMU_SPAPR_INFO_DDW;
840                         info.ddw.pgsizes = table_group->pgsizes;
841                         info.ddw.max_dynamic_windows_supported =
842                                 table_group->max_dynamic_windows_supported;
843                         info.ddw.levels = table_group->max_levels;
844                 }
845
846                 ddwsz = offsetofend(struct vfio_iommu_spapr_tce_info, ddw);
847
848                 if (info.argsz >= ddwsz)
849                         minsz = ddwsz;
850
851                 if (copy_to_user((void __user *)arg, &info, minsz))
852                         return -EFAULT;
853
854                 return 0;
855         }
856         case VFIO_IOMMU_MAP_DMA: {
857                 struct vfio_iommu_type1_dma_map param;
858                 struct iommu_table *tbl = NULL;
859                 long num;
860                 enum dma_data_direction direction;
861
862                 if (!container->enabled)
863                         return -EPERM;
864
865                 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
866
867                 if (copy_from_user(&param, (void __user *)arg, minsz))
868                         return -EFAULT;
869
870                 if (param.argsz < minsz)
871                         return -EINVAL;
872
873                 if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ |
874                                 VFIO_DMA_MAP_FLAG_WRITE))
875                         return -EINVAL;
876
877                 ret = tce_iommu_create_default_window(container);
878                 if (ret)
879                         return ret;
880
881                 num = tce_iommu_find_table(container, param.iova, &tbl);
882                 if (num < 0)
883                         return -ENXIO;
884
885                 if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
886                                 (param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
887                         return -EINVAL;
888
889                 /* iova is checked by the IOMMU API */
890                 if (param.flags & VFIO_DMA_MAP_FLAG_READ) {
891                         if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
892                                 direction = DMA_BIDIRECTIONAL;
893                         else
894                                 direction = DMA_TO_DEVICE;
895                 } else {
896                         if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
897                                 direction = DMA_FROM_DEVICE;
898                         else
899                                 return -EINVAL;
900                 }
901
902                 ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr);
903                 if (ret)
904                         return ret;
905
906                 if (container->v2)
907                         ret = tce_iommu_build_v2(container, tbl,
908                                         param.iova >> tbl->it_page_shift,
909                                         param.vaddr,
910                                         param.size >> tbl->it_page_shift,
911                                         direction);
912                 else
913                         ret = tce_iommu_build(container, tbl,
914                                         param.iova >> tbl->it_page_shift,
915                                         param.vaddr,
916                                         param.size >> tbl->it_page_shift,
917                                         direction);
918
919                 iommu_flush_tce(tbl);
920
921                 return ret;
922         }
923         case VFIO_IOMMU_UNMAP_DMA: {
924                 struct vfio_iommu_type1_dma_unmap param;
925                 struct iommu_table *tbl = NULL;
926                 long num;
927
928                 if (!container->enabled)
929                         return -EPERM;
930
931                 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
932                                 size);
933
934                 if (copy_from_user(&param, (void __user *)arg, minsz))
935                         return -EFAULT;
936
937                 if (param.argsz < minsz)
938                         return -EINVAL;
939
940                 /* No flag is supported now */
941                 if (param.flags)
942                         return -EINVAL;
943
944                 ret = tce_iommu_create_default_window(container);
945                 if (ret)
946                         return ret;
947
948                 num = tce_iommu_find_table(container, param.iova, &tbl);
949                 if (num < 0)
950                         return -ENXIO;
951
952                 if (param.size & ~IOMMU_PAGE_MASK(tbl))
953                         return -EINVAL;
954
955                 ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
956                                 param.size >> tbl->it_page_shift);
957                 if (ret)
958                         return ret;
959
960                 ret = tce_iommu_clear(container, tbl,
961                                 param.iova >> tbl->it_page_shift,
962                                 param.size >> tbl->it_page_shift);
963                 iommu_flush_tce(tbl);
964
965                 return ret;
966         }
967         case VFIO_IOMMU_SPAPR_REGISTER_MEMORY: {
968                 struct vfio_iommu_spapr_register_memory param;
969
970                 if (!container->v2)
971                         break;
972
973                 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
974                                 size);
975
976                 ret = tce_iommu_mm_set(container);
977                 if (ret)
978                         return ret;
979
980                 if (copy_from_user(&param, (void __user *)arg, minsz))
981                         return -EFAULT;
982
983                 if (param.argsz < minsz)
984                         return -EINVAL;
985
986                 /* No flag is supported now */
987                 if (param.flags)
988                         return -EINVAL;
989
990                 mutex_lock(&container->lock);
991                 ret = tce_iommu_register_pages(container, param.vaddr,
992                                 param.size);
993                 mutex_unlock(&container->lock);
994
995                 return ret;
996         }
997         case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY: {
998                 struct vfio_iommu_spapr_register_memory param;
999
1000                 if (!container->v2)
1001                         break;
1002
1003                 if (!container->mm)
1004                         return -EPERM;
1005
1006                 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
1007                                 size);
1008
1009                 if (copy_from_user(&param, (void __user *)arg, minsz))
1010                         return -EFAULT;
1011
1012                 if (param.argsz < minsz)
1013                         return -EINVAL;
1014
1015                 /* No flag is supported now */
1016                 if (param.flags)
1017                         return -EINVAL;
1018
1019                 mutex_lock(&container->lock);
1020                 ret = tce_iommu_unregister_pages(container, param.vaddr,
1021                                 param.size);
1022                 mutex_unlock(&container->lock);
1023
1024                 return ret;
1025         }
1026         case VFIO_IOMMU_ENABLE:
1027                 if (container->v2)
1028                         break;
1029
1030                 mutex_lock(&container->lock);
1031                 ret = tce_iommu_enable(container);
1032                 mutex_unlock(&container->lock);
1033                 return ret;
1034
1035
1036         case VFIO_IOMMU_DISABLE:
1037                 if (container->v2)
1038                         break;
1039
1040                 mutex_lock(&container->lock);
1041                 tce_iommu_disable(container);
1042                 mutex_unlock(&container->lock);
1043                 return 0;
1044
1045         case VFIO_EEH_PE_OP: {
1046                 struct tce_iommu_group *tcegrp;
1047
1048                 ret = 0;
1049                 list_for_each_entry(tcegrp, &container->group_list, next) {
1050                         ret = vfio_spapr_iommu_eeh_ioctl(tcegrp->grp,
1051                                         cmd, arg);
1052                         if (ret)
1053                                 return ret;
1054                 }
1055                 return ret;
1056         }
1057
1058         case VFIO_IOMMU_SPAPR_TCE_CREATE: {
1059                 struct vfio_iommu_spapr_tce_create create;
1060
1061                 if (!container->v2)
1062                         break;
1063
1064                 ret = tce_iommu_mm_set(container);
1065                 if (ret)
1066                         return ret;
1067
1068                 if (!tce_groups_attached(container))
1069                         return -ENXIO;
1070
1071                 minsz = offsetofend(struct vfio_iommu_spapr_tce_create,
1072                                 start_addr);
1073
1074                 if (copy_from_user(&create, (void __user *)arg, minsz))
1075                         return -EFAULT;
1076
1077                 if (create.argsz < minsz)
1078                         return -EINVAL;
1079
1080                 if (create.flags)
1081                         return -EINVAL;
1082
1083                 mutex_lock(&container->lock);
1084
1085                 ret = tce_iommu_create_default_window(container);
1086                 if (!ret)
1087                         ret = tce_iommu_create_window(container,
1088                                         create.page_shift,
1089                                         create.window_size, create.levels,
1090                                         &create.start_addr);
1091
1092                 mutex_unlock(&container->lock);
1093
1094                 if (!ret && copy_to_user((void __user *)arg, &create, minsz))
1095                         ret = -EFAULT;
1096
1097                 return ret;
1098         }
1099         case VFIO_IOMMU_SPAPR_TCE_REMOVE: {
1100                 struct vfio_iommu_spapr_tce_remove remove;
1101
1102                 if (!container->v2)
1103                         break;
1104
1105                 ret = tce_iommu_mm_set(container);
1106                 if (ret)
1107                         return ret;
1108
1109                 if (!tce_groups_attached(container))
1110                         return -ENXIO;
1111
1112                 minsz = offsetofend(struct vfio_iommu_spapr_tce_remove,
1113                                 start_addr);
1114
1115                 if (copy_from_user(&remove, (void __user *)arg, minsz))
1116                         return -EFAULT;
1117
1118                 if (remove.argsz < minsz)
1119                         return -EINVAL;
1120
1121                 if (remove.flags)
1122                         return -EINVAL;
1123
1124                 if (container->def_window_pending && !remove.start_addr) {
1125                         container->def_window_pending = false;
1126                         return 0;
1127                 }
1128
1129                 mutex_lock(&container->lock);
1130
1131                 ret = tce_iommu_remove_window(container, remove.start_addr);
1132
1133                 mutex_unlock(&container->lock);
1134
1135                 return ret;
1136         }
1137         }
1138
1139         return -ENOTTY;
1140 }
1141
1142 static void tce_iommu_release_ownership(struct tce_container *container,
1143                 struct iommu_table_group *table_group)
1144 {
1145         int i;
1146
1147         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1148                 struct iommu_table *tbl = container->tables[i];
1149
1150                 if (!tbl)
1151                         continue;
1152
1153                 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
1154                 if (tbl->it_map)
1155                         iommu_release_ownership(tbl);
1156
1157                 container->tables[i] = NULL;
1158         }
1159 }
1160
1161 static int tce_iommu_take_ownership(struct tce_container *container,
1162                 struct iommu_table_group *table_group)
1163 {
1164         int i, j, rc = 0;
1165
1166         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1167                 struct iommu_table *tbl = table_group->tables[i];
1168
1169                 if (!tbl || !tbl->it_map)
1170                         continue;
1171
1172                 rc = iommu_take_ownership(tbl);
1173                 if (rc) {
1174                         for (j = 0; j < i; ++j)
1175                                 iommu_release_ownership(
1176                                                 table_group->tables[j]);
1177
1178                         return rc;
1179                 }
1180         }
1181
1182         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1183                 container->tables[i] = table_group->tables[i];
1184
1185         return 0;
1186 }
1187
1188 static void tce_iommu_release_ownership_ddw(struct tce_container *container,
1189                 struct iommu_table_group *table_group)
1190 {
1191         long i;
1192
1193         if (!table_group->ops->unset_window) {
1194                 WARN_ON_ONCE(1);
1195                 return;
1196         }
1197
1198         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1199                 table_group->ops->unset_window(table_group, i);
1200
1201         table_group->ops->release_ownership(table_group);
1202 }
1203
1204 static long tce_iommu_take_ownership_ddw(struct tce_container *container,
1205                 struct iommu_table_group *table_group)
1206 {
1207         long i, ret = 0;
1208
1209         if (!table_group->ops->create_table || !table_group->ops->set_window ||
1210                         !table_group->ops->release_ownership) {
1211                 WARN_ON_ONCE(1);
1212                 return -EFAULT;
1213         }
1214
1215         table_group->ops->take_ownership(table_group);
1216
1217         /* Set all windows to the new group */
1218         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1219                 struct iommu_table *tbl = container->tables[i];
1220
1221                 if (!tbl)
1222                         continue;
1223
1224                 ret = table_group->ops->set_window(table_group, i, tbl);
1225                 if (ret)
1226                         goto release_exit;
1227         }
1228
1229         return 0;
1230
1231 release_exit:
1232         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1233                 table_group->ops->unset_window(table_group, i);
1234
1235         table_group->ops->release_ownership(table_group);
1236
1237         return ret;
1238 }
1239
1240 static int tce_iommu_attach_group(void *iommu_data,
1241                 struct iommu_group *iommu_group)
1242 {
1243         int ret;
1244         struct tce_container *container = iommu_data;
1245         struct iommu_table_group *table_group;
1246         struct tce_iommu_group *tcegrp = NULL;
1247
1248         mutex_lock(&container->lock);
1249
1250         /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
1251                         iommu_group_id(iommu_group), iommu_group); */
1252         table_group = iommu_group_get_iommudata(iommu_group);
1253         if (!table_group) {
1254                 ret = -ENODEV;
1255                 goto unlock_exit;
1256         }
1257
1258         if (tce_groups_attached(container) && (!table_group->ops ||
1259                         !table_group->ops->take_ownership ||
1260                         !table_group->ops->release_ownership)) {
1261                 ret = -EBUSY;
1262                 goto unlock_exit;
1263         }
1264
1265         /* Check if new group has the same iommu_ops (i.e. compatible) */
1266         list_for_each_entry(tcegrp, &container->group_list, next) {
1267                 struct iommu_table_group *table_group_tmp;
1268
1269                 if (tcegrp->grp == iommu_group) {
1270                         pr_warn("tce_vfio: Group %d is already attached\n",
1271                                         iommu_group_id(iommu_group));
1272                         ret = -EBUSY;
1273                         goto unlock_exit;
1274                 }
1275                 table_group_tmp = iommu_group_get_iommudata(tcegrp->grp);
1276                 if (table_group_tmp->ops->create_table !=
1277                                 table_group->ops->create_table) {
1278                         pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
1279                                         iommu_group_id(iommu_group),
1280                                         iommu_group_id(tcegrp->grp));
1281                         ret = -EPERM;
1282                         goto unlock_exit;
1283                 }
1284         }
1285
1286         tcegrp = kzalloc(sizeof(*tcegrp), GFP_KERNEL);
1287         if (!tcegrp) {
1288                 ret = -ENOMEM;
1289                 goto unlock_exit;
1290         }
1291
1292         if (!table_group->ops || !table_group->ops->take_ownership ||
1293                         !table_group->ops->release_ownership) {
1294                 if (container->v2) {
1295                         ret = -EPERM;
1296                         goto unlock_exit;
1297                 }
1298                 ret = tce_iommu_take_ownership(container, table_group);
1299         } else {
1300                 if (!container->v2) {
1301                         ret = -EPERM;
1302                         goto unlock_exit;
1303                 }
1304                 ret = tce_iommu_take_ownership_ddw(container, table_group);
1305                 if (!tce_groups_attached(container) && !container->tables[0])
1306                         container->def_window_pending = true;
1307         }
1308
1309         if (!ret) {
1310                 tcegrp->grp = iommu_group;
1311                 list_add(&tcegrp->next, &container->group_list);
1312         }
1313
1314 unlock_exit:
1315         if (ret && tcegrp)
1316                 kfree(tcegrp);
1317
1318         mutex_unlock(&container->lock);
1319
1320         return ret;
1321 }
1322
1323 static void tce_iommu_detach_group(void *iommu_data,
1324                 struct iommu_group *iommu_group)
1325 {
1326         struct tce_container *container = iommu_data;
1327         struct iommu_table_group *table_group;
1328         bool found = false;
1329         struct tce_iommu_group *tcegrp;
1330
1331         mutex_lock(&container->lock);
1332
1333         list_for_each_entry(tcegrp, &container->group_list, next) {
1334                 if (tcegrp->grp == iommu_group) {
1335                         found = true;
1336                         break;
1337                 }
1338         }
1339
1340         if (!found) {
1341                 pr_warn("tce_vfio: detaching unattached group #%u\n",
1342                                 iommu_group_id(iommu_group));
1343                 goto unlock_exit;
1344         }
1345
1346         list_del(&tcegrp->next);
1347         kfree(tcegrp);
1348
1349         table_group = iommu_group_get_iommudata(iommu_group);
1350         BUG_ON(!table_group);
1351
1352         if (!table_group->ops || !table_group->ops->release_ownership)
1353                 tce_iommu_release_ownership(container, table_group);
1354         else
1355                 tce_iommu_release_ownership_ddw(container, table_group);
1356
1357 unlock_exit:
1358         mutex_unlock(&container->lock);
1359 }
1360
1361 const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
1362         .name           = "iommu-vfio-powerpc",
1363         .owner          = THIS_MODULE,
1364         .open           = tce_iommu_open,
1365         .release        = tce_iommu_release,
1366         .ioctl          = tce_iommu_ioctl,
1367         .attach_group   = tce_iommu_attach_group,
1368         .detach_group   = tce_iommu_detach_group,
1369 };
1370
1371 static int __init tce_iommu_init(void)
1372 {
1373         return vfio_register_iommu_driver(&tce_iommu_driver_ops);
1374 }
1375
1376 static void __exit tce_iommu_cleanup(void)
1377 {
1378         vfio_unregister_iommu_driver(&tce_iommu_driver_ops);
1379 }
1380
1381 module_init(tce_iommu_init);
1382 module_exit(tce_iommu_cleanup);
1383
1384 MODULE_VERSION(DRIVER_VERSION);
1385 MODULE_LICENSE("GPL v2");
1386 MODULE_AUTHOR(DRIVER_AUTHOR);
1387 MODULE_DESCRIPTION(DRIVER_DESC);
1388