GNU Linux-libre 5.15.72-gnu
[releases.git] / arch / powerpc / kvm / book3s_64_vio.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5  * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
6  * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
7  */
8
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/highmem.h>
14 #include <linux/gfp.h>
15 #include <linux/slab.h>
16 #include <linux/sched/signal.h>
17 #include <linux/hugetlb.h>
18 #include <linux/list.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/iommu.h>
21 #include <linux/file.h>
22 #include <linux/mm.h>
23
24 #include <asm/kvm_ppc.h>
25 #include <asm/kvm_book3s.h>
26 #include <asm/book3s/64/mmu-hash.h>
27 #include <asm/hvcall.h>
28 #include <asm/synch.h>
29 #include <asm/ppc-opcode.h>
30 #include <asm/udbg.h>
31 #include <asm/iommu.h>
32 #include <asm/tce.h>
33 #include <asm/mmu_context.h>
34
35 static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
36 {
37         return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
38 }
39
40 static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
41 {
42         unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
43                         (tce_pages * sizeof(struct page *));
44
45         return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
46 }
47
48 static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
49 {
50         struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
51                         struct kvmppc_spapr_tce_iommu_table, rcu);
52
53         iommu_tce_table_put(stit->tbl);
54
55         kfree(stit);
56 }
57
58 static void kvm_spapr_tce_liobn_put(struct kref *kref)
59 {
60         struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
61                         struct kvmppc_spapr_tce_iommu_table, kref);
62
63         list_del_rcu(&stit->next);
64
65         call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
66 }
67
68 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
69                 struct iommu_group *grp)
70 {
71         int i;
72         struct kvmppc_spapr_tce_table *stt;
73         struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
74         struct iommu_table_group *table_group = NULL;
75
76         rcu_read_lock();
77         list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
78
79                 table_group = iommu_group_get_iommudata(grp);
80                 if (WARN_ON(!table_group))
81                         continue;
82
83                 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
84                         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
85                                 if (table_group->tables[i] != stit->tbl)
86                                         continue;
87
88                                 kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
89                         }
90                 }
91                 cond_resched_rcu();
92         }
93         rcu_read_unlock();
94 }
95
96 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
97                 struct iommu_group *grp)
98 {
99         struct kvmppc_spapr_tce_table *stt = NULL;
100         bool found = false;
101         struct iommu_table *tbl = NULL;
102         struct iommu_table_group *table_group;
103         long i;
104         struct kvmppc_spapr_tce_iommu_table *stit;
105         struct fd f;
106
107         f = fdget(tablefd);
108         if (!f.file)
109                 return -EBADF;
110
111         rcu_read_lock();
112         list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
113                 if (stt == f.file->private_data) {
114                         found = true;
115                         break;
116                 }
117         }
118         rcu_read_unlock();
119
120         fdput(f);
121
122         if (!found)
123                 return -EINVAL;
124
125         table_group = iommu_group_get_iommudata(grp);
126         if (WARN_ON(!table_group))
127                 return -EFAULT;
128
129         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
130                 struct iommu_table *tbltmp = table_group->tables[i];
131
132                 if (!tbltmp)
133                         continue;
134                 /* Make sure hardware table parameters are compatible */
135                 if ((tbltmp->it_page_shift <= stt->page_shift) &&
136                                 (tbltmp->it_offset << tbltmp->it_page_shift ==
137                                  stt->offset << stt->page_shift) &&
138                                 (tbltmp->it_size << tbltmp->it_page_shift >=
139                                  stt->size << stt->page_shift)) {
140                         /*
141                          * Reference the table to avoid races with
142                          * add/remove DMA windows.
143                          */
144                         tbl = iommu_tce_table_get(tbltmp);
145                         break;
146                 }
147         }
148         if (!tbl)
149                 return -EINVAL;
150
151         rcu_read_lock();
152         list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
153                 if (tbl != stit->tbl)
154                         continue;
155
156                 if (!kref_get_unless_zero(&stit->kref)) {
157                         /* stit is being destroyed */
158                         iommu_tce_table_put(tbl);
159                         rcu_read_unlock();
160                         return -ENOTTY;
161                 }
162                 /*
163                  * The table is already known to this KVM, we just increased
164                  * its KVM reference counter and can return.
165                  */
166                 rcu_read_unlock();
167                 return 0;
168         }
169         rcu_read_unlock();
170
171         stit = kzalloc(sizeof(*stit), GFP_KERNEL);
172         if (!stit) {
173                 iommu_tce_table_put(tbl);
174                 return -ENOMEM;
175         }
176
177         stit->tbl = tbl;
178         kref_init(&stit->kref);
179
180         list_add_rcu(&stit->next, &stt->iommu_tables);
181
182         return 0;
183 }
184
185 static void release_spapr_tce_table(struct rcu_head *head)
186 {
187         struct kvmppc_spapr_tce_table *stt = container_of(head,
188                         struct kvmppc_spapr_tce_table, rcu);
189         unsigned long i, npages = kvmppc_tce_pages(stt->size);
190
191         for (i = 0; i < npages; i++)
192                 if (stt->pages[i])
193                         __free_page(stt->pages[i]);
194
195         kfree(stt);
196 }
197
198 static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt,
199                 unsigned long sttpage)
200 {
201         struct page *page = stt->pages[sttpage];
202
203         if (page)
204                 return page;
205
206         mutex_lock(&stt->alloc_lock);
207         page = stt->pages[sttpage];
208         if (!page) {
209                 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
210                 WARN_ON_ONCE(!page);
211                 if (page)
212                         stt->pages[sttpage] = page;
213         }
214         mutex_unlock(&stt->alloc_lock);
215
216         return page;
217 }
218
219 static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
220 {
221         struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
222         struct page *page;
223
224         if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
225                 return VM_FAULT_SIGBUS;
226
227         page = kvm_spapr_get_tce_page(stt, vmf->pgoff);
228         if (!page)
229                 return VM_FAULT_OOM;
230
231         get_page(page);
232         vmf->page = page;
233         return 0;
234 }
235
236 static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
237         .fault = kvm_spapr_tce_fault,
238 };
239
240 static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
241 {
242         vma->vm_ops = &kvm_spapr_tce_vm_ops;
243         return 0;
244 }
245
246 static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
247 {
248         struct kvmppc_spapr_tce_table *stt = filp->private_data;
249         struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
250         struct kvm *kvm = stt->kvm;
251
252         mutex_lock(&kvm->lock);
253         list_del_rcu(&stt->list);
254         mutex_unlock(&kvm->lock);
255
256         list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
257                 WARN_ON(!kref_read(&stit->kref));
258                 while (1) {
259                         if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
260                                 break;
261                 }
262         }
263
264         account_locked_vm(kvm->mm,
265                 kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
266
267         kvm_put_kvm(stt->kvm);
268
269         call_rcu(&stt->rcu, release_spapr_tce_table);
270
271         return 0;
272 }
273
274 static const struct file_operations kvm_spapr_tce_fops = {
275         .mmap           = kvm_spapr_tce_mmap,
276         .release        = kvm_spapr_tce_release,
277 };
278
279 long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
280                                    struct kvm_create_spapr_tce_64 *args)
281 {
282         struct kvmppc_spapr_tce_table *stt = NULL;
283         struct kvmppc_spapr_tce_table *siter;
284         struct mm_struct *mm = kvm->mm;
285         unsigned long npages, size = args->size;
286         int ret;
287
288         if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
289                 (args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
290                 return -EINVAL;
291
292         npages = kvmppc_tce_pages(size);
293         ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true);
294         if (ret)
295                 return ret;
296
297         ret = -ENOMEM;
298         stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
299                       GFP_KERNEL);
300         if (!stt)
301                 goto fail_acct;
302
303         stt->liobn = args->liobn;
304         stt->page_shift = args->page_shift;
305         stt->offset = args->offset;
306         stt->size = size;
307         stt->kvm = kvm;
308         mutex_init(&stt->alloc_lock);
309         INIT_LIST_HEAD_RCU(&stt->iommu_tables);
310
311         mutex_lock(&kvm->lock);
312
313         /* Check this LIOBN hasn't been previously allocated */
314         ret = 0;
315         list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
316                 if (siter->liobn == args->liobn) {
317                         ret = -EBUSY;
318                         break;
319                 }
320         }
321
322         kvm_get_kvm(kvm);
323         if (!ret)
324                 ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
325                                        stt, O_RDWR | O_CLOEXEC);
326
327         if (ret >= 0)
328                 list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
329         else
330                 kvm_put_kvm_no_destroy(kvm);
331
332         mutex_unlock(&kvm->lock);
333
334         if (ret >= 0)
335                 return ret;
336
337         kfree(stt);
338  fail_acct:
339         account_locked_vm(mm, kvmppc_stt_pages(npages), false);
340         return ret;
341 }
342
343 static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
344                 unsigned long *ua)
345 {
346         unsigned long gfn = tce >> PAGE_SHIFT;
347         struct kvm_memory_slot *memslot;
348
349         memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
350         if (!memslot)
351                 return -EINVAL;
352
353         *ua = __gfn_to_hva_memslot(memslot, gfn) |
354                 (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
355
356         return 0;
357 }
358
359 static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
360                 unsigned long tce)
361 {
362         unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
363         enum dma_data_direction dir = iommu_tce_direction(tce);
364         struct kvmppc_spapr_tce_iommu_table *stit;
365         unsigned long ua = 0;
366
367         /* Allow userspace to poison TCE table */
368         if (dir == DMA_NONE)
369                 return H_SUCCESS;
370
371         if (iommu_tce_check_gpa(stt->page_shift, gpa))
372                 return H_TOO_HARD;
373
374         if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
375                 return H_TOO_HARD;
376
377         rcu_read_lock();
378         list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
379                 unsigned long hpa = 0;
380                 struct mm_iommu_table_group_mem_t *mem;
381                 long shift = stit->tbl->it_page_shift;
382
383                 mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
384                 if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) {
385                         rcu_read_unlock();
386                         return H_TOO_HARD;
387                 }
388         }
389         rcu_read_unlock();
390
391         return H_SUCCESS;
392 }
393
394 /*
395  * Handles TCE requests for emulated devices.
396  * Puts guest TCE values to the table and expects user space to convert them.
397  * Cannot fail so kvmppc_tce_validate must be called before it.
398  */
399 static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
400                 unsigned long idx, unsigned long tce)
401 {
402         struct page *page;
403         u64 *tbl;
404         unsigned long sttpage;
405
406         idx -= stt->offset;
407         sttpage = idx / TCES_PER_PAGE;
408         page = stt->pages[sttpage];
409
410         if (!page) {
411                 /* We allow any TCE, not just with read|write permissions */
412                 if (!tce)
413                         return;
414
415                 page = kvm_spapr_get_tce_page(stt, sttpage);
416                 if (!page)
417                         return;
418         }
419         tbl = page_to_virt(page);
420
421         tbl[idx % TCES_PER_PAGE] = tce;
422 }
423
424 static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
425                 struct iommu_table *tbl, unsigned long entry)
426 {
427         unsigned long i;
428         unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
429         unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
430
431         for (i = 0; i < subpages; ++i) {
432                 unsigned long hpa = 0;
433                 enum dma_data_direction dir = DMA_NONE;
434
435                 iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
436         }
437 }
438
439 static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
440                 struct iommu_table *tbl, unsigned long entry)
441 {
442         struct mm_iommu_table_group_mem_t *mem = NULL;
443         const unsigned long pgsize = 1ULL << tbl->it_page_shift;
444         __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
445
446         if (!pua)
447                 return H_SUCCESS;
448
449         mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
450         if (!mem)
451                 return H_TOO_HARD;
452
453         mm_iommu_mapped_dec(mem);
454
455         *pua = cpu_to_be64(0);
456
457         return H_SUCCESS;
458 }
459
460 static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
461                 struct iommu_table *tbl, unsigned long entry)
462 {
463         enum dma_data_direction dir = DMA_NONE;
464         unsigned long hpa = 0;
465         long ret;
466
467         if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa,
468                                         &dir)))
469                 return H_TOO_HARD;
470
471         if (dir == DMA_NONE)
472                 return H_SUCCESS;
473
474         ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
475         if (ret != H_SUCCESS)
476                 iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
477
478         return ret;
479 }
480
481 static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
482                 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
483                 unsigned long entry)
484 {
485         unsigned long i, ret = H_SUCCESS;
486         unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
487         unsigned long io_entry = entry * subpages;
488
489         for (i = 0; i < subpages; ++i) {
490                 ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
491                 if (ret != H_SUCCESS)
492                         break;
493         }
494
495         iommu_tce_kill(tbl, io_entry, subpages);
496
497         return ret;
498 }
499
500 static long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
501                 unsigned long entry, unsigned long ua,
502                 enum dma_data_direction dir)
503 {
504         long ret;
505         unsigned long hpa;
506         __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
507         struct mm_iommu_table_group_mem_t *mem;
508
509         if (!pua)
510                 /* it_userspace allocation might be delayed */
511                 return H_TOO_HARD;
512
513         mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
514         if (!mem)
515                 /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
516                 return H_TOO_HARD;
517
518         if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
519                 return H_TOO_HARD;
520
521         if (mm_iommu_mapped_inc(mem))
522                 return H_TOO_HARD;
523
524         ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
525         if (WARN_ON_ONCE(ret)) {
526                 mm_iommu_mapped_dec(mem);
527                 return H_TOO_HARD;
528         }
529
530         if (dir != DMA_NONE)
531                 kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
532
533         *pua = cpu_to_be64(ua);
534
535         return 0;
536 }
537
538 static long kvmppc_tce_iommu_map(struct kvm *kvm,
539                 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
540                 unsigned long entry, unsigned long ua,
541                 enum dma_data_direction dir)
542 {
543         unsigned long i, pgoff, ret = H_SUCCESS;
544         unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
545         unsigned long io_entry = entry * subpages;
546
547         for (i = 0, pgoff = 0; i < subpages;
548                         ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
549
550                 ret = kvmppc_tce_iommu_do_map(kvm, tbl,
551                                 io_entry + i, ua + pgoff, dir);
552                 if (ret != H_SUCCESS)
553                         break;
554         }
555
556         iommu_tce_kill(tbl, io_entry, subpages);
557
558         return ret;
559 }
560
561 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
562                       unsigned long ioba, unsigned long tce)
563 {
564         struct kvmppc_spapr_tce_table *stt;
565         long ret, idx;
566         struct kvmppc_spapr_tce_iommu_table *stit;
567         unsigned long entry, ua = 0;
568         enum dma_data_direction dir;
569
570         /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
571         /*          liobn, ioba, tce); */
572
573         stt = kvmppc_find_table(vcpu->kvm, liobn);
574         if (!stt)
575                 return H_TOO_HARD;
576
577         ret = kvmppc_ioba_validate(stt, ioba, 1);
578         if (ret != H_SUCCESS)
579                 return ret;
580
581         idx = srcu_read_lock(&vcpu->kvm->srcu);
582
583         ret = kvmppc_tce_validate(stt, tce);
584         if (ret != H_SUCCESS)
585                 goto unlock_exit;
586
587         dir = iommu_tce_direction(tce);
588
589         if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
590                 ret = H_PARAMETER;
591                 goto unlock_exit;
592         }
593
594         entry = ioba >> stt->page_shift;
595
596         list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
597                 if (dir == DMA_NONE)
598                         ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
599                                         stit->tbl, entry);
600                 else
601                         ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
602                                         entry, ua, dir);
603
604
605                 if (ret != H_SUCCESS) {
606                         kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
607                         goto unlock_exit;
608                 }
609         }
610
611         kvmppc_tce_put(stt, entry, tce);
612
613 unlock_exit:
614         srcu_read_unlock(&vcpu->kvm->srcu, idx);
615
616         return ret;
617 }
618 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
619
620 long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
621                 unsigned long liobn, unsigned long ioba,
622                 unsigned long tce_list, unsigned long npages)
623 {
624         struct kvmppc_spapr_tce_table *stt;
625         long i, ret = H_SUCCESS, idx;
626         unsigned long entry, ua = 0;
627         u64 __user *tces;
628         u64 tce;
629         struct kvmppc_spapr_tce_iommu_table *stit;
630
631         stt = kvmppc_find_table(vcpu->kvm, liobn);
632         if (!stt)
633                 return H_TOO_HARD;
634
635         entry = ioba >> stt->page_shift;
636         /*
637          * SPAPR spec says that the maximum size of the list is 512 TCEs
638          * so the whole table fits in 4K page
639          */
640         if (npages > 512)
641                 return H_PARAMETER;
642
643         if (tce_list & (SZ_4K - 1))
644                 return H_PARAMETER;
645
646         ret = kvmppc_ioba_validate(stt, ioba, npages);
647         if (ret != H_SUCCESS)
648                 return ret;
649
650         idx = srcu_read_lock(&vcpu->kvm->srcu);
651         if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
652                 ret = H_TOO_HARD;
653                 goto unlock_exit;
654         }
655         tces = (u64 __user *) ua;
656
657         for (i = 0; i < npages; ++i) {
658                 if (get_user(tce, tces + i)) {
659                         ret = H_TOO_HARD;
660                         goto unlock_exit;
661                 }
662                 tce = be64_to_cpu(tce);
663
664                 ret = kvmppc_tce_validate(stt, tce);
665                 if (ret != H_SUCCESS)
666                         goto unlock_exit;
667         }
668
669         for (i = 0; i < npages; ++i) {
670                 /*
671                  * This looks unsafe, because we validate, then regrab
672                  * the TCE from userspace which could have been changed by
673                  * another thread.
674                  *
675                  * But it actually is safe, because the relevant checks will be
676                  * re-executed in the following code.  If userspace tries to
677                  * change this dodgily it will result in a messier failure mode
678                  * but won't threaten the host.
679                  */
680                 if (get_user(tce, tces + i)) {
681                         ret = H_TOO_HARD;
682                         goto unlock_exit;
683                 }
684                 tce = be64_to_cpu(tce);
685
686                 if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
687                         ret = H_PARAMETER;
688                         goto unlock_exit;
689                 }
690
691                 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
692                         ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
693                                         stit->tbl, entry + i, ua,
694                                         iommu_tce_direction(tce));
695
696                         if (ret != H_SUCCESS) {
697                                 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
698                                                  entry + i);
699                                 goto unlock_exit;
700                         }
701                 }
702
703                 kvmppc_tce_put(stt, entry + i, tce);
704         }
705
706 unlock_exit:
707         srcu_read_unlock(&vcpu->kvm->srcu, idx);
708
709         return ret;
710 }
711 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
712
713 long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
714                 unsigned long liobn, unsigned long ioba,
715                 unsigned long tce_value, unsigned long npages)
716 {
717         struct kvmppc_spapr_tce_table *stt;
718         long i, ret;
719         struct kvmppc_spapr_tce_iommu_table *stit;
720
721         stt = kvmppc_find_table(vcpu->kvm, liobn);
722         if (!stt)
723                 return H_TOO_HARD;
724
725         ret = kvmppc_ioba_validate(stt, ioba, npages);
726         if (ret != H_SUCCESS)
727                 return ret;
728
729         /* Check permission bits only to allow userspace poison TCE for debug */
730         if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
731                 return H_PARAMETER;
732
733         list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
734                 unsigned long entry = ioba >> stt->page_shift;
735
736                 for (i = 0; i < npages; ++i) {
737                         ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
738                                         stit->tbl, entry + i);
739
740                         if (ret == H_SUCCESS)
741                                 continue;
742
743                         if (ret == H_TOO_HARD)
744                                 return ret;
745
746                         WARN_ON_ONCE(1);
747                         kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
748                 }
749         }
750
751         for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
752                 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
753
754         return ret;
755 }
756 EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);