GNU Linux-libre 5.4.274-gnu1
[releases.git] / arch / powerpc / kvm / book3s_64_vio.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5  * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
6  * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
7  */
8
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/highmem.h>
14 #include <linux/gfp.h>
15 #include <linux/slab.h>
16 #include <linux/sched/signal.h>
17 #include <linux/hugetlb.h>
18 #include <linux/list.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/iommu.h>
21 #include <linux/file.h>
22 #include <linux/mm.h>
23
24 #include <asm/kvm_ppc.h>
25 #include <asm/kvm_book3s.h>
26 #include <asm/book3s/64/mmu-hash.h>
27 #include <asm/hvcall.h>
28 #include <asm/synch.h>
29 #include <asm/ppc-opcode.h>
30 #include <asm/kvm_host.h>
31 #include <asm/udbg.h>
32 #include <asm/iommu.h>
33 #include <asm/tce.h>
34 #include <asm/mmu_context.h>
35
36 static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
37 {
38         return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
39 }
40
41 static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
42 {
43         unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
44                         (tce_pages * sizeof(struct page *));
45
46         return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
47 }
48
49 static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
50 {
51         struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
52                         struct kvmppc_spapr_tce_iommu_table, rcu);
53
54         iommu_tce_table_put(stit->tbl);
55
56         kfree(stit);
57 }
58
59 static void kvm_spapr_tce_liobn_put(struct kref *kref)
60 {
61         struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
62                         struct kvmppc_spapr_tce_iommu_table, kref);
63
64         list_del_rcu(&stit->next);
65
66         call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
67 }
68
69 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
70                 struct iommu_group *grp)
71 {
72         int i;
73         struct kvmppc_spapr_tce_table *stt;
74         struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
75         struct iommu_table_group *table_group = NULL;
76
77         rcu_read_lock();
78         list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
79
80                 table_group = iommu_group_get_iommudata(grp);
81                 if (WARN_ON(!table_group))
82                         continue;
83
84                 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
85                         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
86                                 if (table_group->tables[i] != stit->tbl)
87                                         continue;
88
89                                 kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
90                         }
91                 }
92                 cond_resched_rcu();
93         }
94         rcu_read_unlock();
95 }
96
97 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
98                 struct iommu_group *grp)
99 {
100         struct kvmppc_spapr_tce_table *stt = NULL;
101         bool found = false;
102         struct iommu_table *tbl = NULL;
103         struct iommu_table_group *table_group;
104         long i;
105         struct kvmppc_spapr_tce_iommu_table *stit;
106         struct fd f;
107
108         f = fdget(tablefd);
109         if (!f.file)
110                 return -EBADF;
111
112         rcu_read_lock();
113         list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
114                 if (stt == f.file->private_data) {
115                         found = true;
116                         break;
117                 }
118         }
119         rcu_read_unlock();
120
121         fdput(f);
122
123         if (!found)
124                 return -EINVAL;
125
126         table_group = iommu_group_get_iommudata(grp);
127         if (WARN_ON(!table_group))
128                 return -EFAULT;
129
130         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
131                 struct iommu_table *tbltmp = table_group->tables[i];
132
133                 if (!tbltmp)
134                         continue;
135                 /* Make sure hardware table parameters are compatible */
136                 if ((tbltmp->it_page_shift <= stt->page_shift) &&
137                                 (tbltmp->it_offset << tbltmp->it_page_shift ==
138                                  stt->offset << stt->page_shift) &&
139                                 (tbltmp->it_size << tbltmp->it_page_shift >=
140                                  stt->size << stt->page_shift)) {
141                         /*
142                          * Reference the table to avoid races with
143                          * add/remove DMA windows.
144                          */
145                         tbl = iommu_tce_table_get(tbltmp);
146                         break;
147                 }
148         }
149         if (!tbl)
150                 return -EINVAL;
151
152         rcu_read_lock();
153         list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
154                 if (tbl != stit->tbl)
155                         continue;
156
157                 if (!kref_get_unless_zero(&stit->kref)) {
158                         /* stit is being destroyed */
159                         iommu_tce_table_put(tbl);
160                         rcu_read_unlock();
161                         return -ENOTTY;
162                 }
163                 /*
164                  * The table is already known to this KVM, we just increased
165                  * its KVM reference counter and can return.
166                  */
167                 rcu_read_unlock();
168                 return 0;
169         }
170         rcu_read_unlock();
171
172         stit = kzalloc(sizeof(*stit), GFP_KERNEL);
173         if (!stit) {
174                 iommu_tce_table_put(tbl);
175                 return -ENOMEM;
176         }
177
178         stit->tbl = tbl;
179         kref_init(&stit->kref);
180
181         list_add_rcu(&stit->next, &stt->iommu_tables);
182
183         return 0;
184 }
185
186 static void release_spapr_tce_table(struct rcu_head *head)
187 {
188         struct kvmppc_spapr_tce_table *stt = container_of(head,
189                         struct kvmppc_spapr_tce_table, rcu);
190         unsigned long i, npages = kvmppc_tce_pages(stt->size);
191
192         for (i = 0; i < npages; i++)
193                 if (stt->pages[i])
194                         __free_page(stt->pages[i]);
195
196         kfree(stt);
197 }
198
199 static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt,
200                 unsigned long sttpage)
201 {
202         struct page *page = stt->pages[sttpage];
203
204         if (page)
205                 return page;
206
207         mutex_lock(&stt->alloc_lock);
208         page = stt->pages[sttpage];
209         if (!page) {
210                 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
211                 WARN_ON_ONCE(!page);
212                 if (page)
213                         stt->pages[sttpage] = page;
214         }
215         mutex_unlock(&stt->alloc_lock);
216
217         return page;
218 }
219
220 static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
221 {
222         struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
223         struct page *page;
224
225         if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
226                 return VM_FAULT_SIGBUS;
227
228         page = kvm_spapr_get_tce_page(stt, vmf->pgoff);
229         if (!page)
230                 return VM_FAULT_OOM;
231
232         get_page(page);
233         vmf->page = page;
234         return 0;
235 }
236
237 static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
238         .fault = kvm_spapr_tce_fault,
239 };
240
241 static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
242 {
243         vma->vm_ops = &kvm_spapr_tce_vm_ops;
244         return 0;
245 }
246
247 static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
248 {
249         struct kvmppc_spapr_tce_table *stt = filp->private_data;
250         struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
251         struct kvm *kvm = stt->kvm;
252
253         mutex_lock(&kvm->lock);
254         list_del_rcu(&stt->list);
255         mutex_unlock(&kvm->lock);
256
257         list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
258                 WARN_ON(!kref_read(&stit->kref));
259                 while (1) {
260                         if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
261                                 break;
262                 }
263         }
264
265         kvm_put_kvm(stt->kvm);
266
267         account_locked_vm(current->mm,
268                 kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
269         call_rcu(&stt->rcu, release_spapr_tce_table);
270
271         return 0;
272 }
273
274 static const struct file_operations kvm_spapr_tce_fops = {
275         .mmap           = kvm_spapr_tce_mmap,
276         .release        = kvm_spapr_tce_release,
277 };
278
279 long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
280                                    struct kvm_create_spapr_tce_64 *args)
281 {
282         struct kvmppc_spapr_tce_table *stt = NULL;
283         struct kvmppc_spapr_tce_table *siter;
284         unsigned long npages, size = args->size;
285         int ret = -ENOMEM;
286
287         if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
288                 (args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
289                 return -EINVAL;
290
291         npages = kvmppc_tce_pages(size);
292         ret = account_locked_vm(current->mm, kvmppc_stt_pages(npages), true);
293         if (ret)
294                 return ret;
295
296         ret = -ENOMEM;
297         stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
298                       GFP_KERNEL);
299         if (!stt)
300                 goto fail_acct;
301
302         stt->liobn = args->liobn;
303         stt->page_shift = args->page_shift;
304         stt->offset = args->offset;
305         stt->size = size;
306         stt->kvm = kvm;
307         mutex_init(&stt->alloc_lock);
308         INIT_LIST_HEAD_RCU(&stt->iommu_tables);
309
310         mutex_lock(&kvm->lock);
311
312         /* Check this LIOBN hasn't been previously allocated */
313         ret = 0;
314         list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
315                 if (siter->liobn == args->liobn) {
316                         ret = -EBUSY;
317                         break;
318                 }
319         }
320
321         kvm_get_kvm(kvm);
322         if (!ret)
323                 ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
324                                        stt, O_RDWR | O_CLOEXEC);
325
326         if (ret >= 0)
327                 list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
328         else
329                 kvm_put_kvm(kvm);
330
331         mutex_unlock(&kvm->lock);
332
333         if (ret >= 0)
334                 return ret;
335
336         kfree(stt);
337  fail_acct:
338         account_locked_vm(current->mm, kvmppc_stt_pages(npages), false);
339         return ret;
340 }
341
342 static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
343                 unsigned long *ua)
344 {
345         unsigned long gfn = tce >> PAGE_SHIFT;
346         struct kvm_memory_slot *memslot;
347
348         memslot = search_memslots(kvm_memslots(kvm), gfn);
349         if (!memslot)
350                 return -EINVAL;
351
352         *ua = __gfn_to_hva_memslot(memslot, gfn) |
353                 (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
354
355         return 0;
356 }
357
358 static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
359                 unsigned long tce)
360 {
361         unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
362         enum dma_data_direction dir = iommu_tce_direction(tce);
363         struct kvmppc_spapr_tce_iommu_table *stit;
364         unsigned long ua = 0;
365
366         /* Allow userspace to poison TCE table */
367         if (dir == DMA_NONE)
368                 return H_SUCCESS;
369
370         if (iommu_tce_check_gpa(stt->page_shift, gpa))
371                 return H_TOO_HARD;
372
373         if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
374                 return H_TOO_HARD;
375
376         rcu_read_lock();
377         list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
378                 unsigned long hpa = 0;
379                 struct mm_iommu_table_group_mem_t *mem;
380                 long shift = stit->tbl->it_page_shift;
381
382                 mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
383                 if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) {
384                         rcu_read_unlock();
385                         return H_TOO_HARD;
386                 }
387         }
388         rcu_read_unlock();
389
390         return H_SUCCESS;
391 }
392
393 /*
394  * Handles TCE requests for emulated devices.
395  * Puts guest TCE values to the table and expects user space to convert them.
396  * Cannot fail so kvmppc_tce_validate must be called before it.
397  */
398 static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
399                 unsigned long idx, unsigned long tce)
400 {
401         struct page *page;
402         u64 *tbl;
403         unsigned long sttpage;
404
405         idx -= stt->offset;
406         sttpage = idx / TCES_PER_PAGE;
407         page = stt->pages[sttpage];
408
409         if (!page) {
410                 /* We allow any TCE, not just with read|write permissions */
411                 if (!tce)
412                         return;
413
414                 page = kvm_spapr_get_tce_page(stt, sttpage);
415                 if (!page)
416                         return;
417         }
418         tbl = page_to_virt(page);
419
420         tbl[idx % TCES_PER_PAGE] = tce;
421 }
422
423 static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
424                 struct iommu_table *tbl, unsigned long entry)
425 {
426         unsigned long i;
427         unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
428         unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
429
430         for (i = 0; i < subpages; ++i) {
431                 unsigned long hpa = 0;
432                 enum dma_data_direction dir = DMA_NONE;
433
434                 iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
435         }
436 }
437
438 static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
439                 struct iommu_table *tbl, unsigned long entry)
440 {
441         struct mm_iommu_table_group_mem_t *mem = NULL;
442         const unsigned long pgsize = 1ULL << tbl->it_page_shift;
443         __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
444
445         if (!pua)
446                 return H_SUCCESS;
447
448         mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
449         if (!mem)
450                 return H_TOO_HARD;
451
452         mm_iommu_mapped_dec(mem);
453
454         *pua = cpu_to_be64(0);
455
456         return H_SUCCESS;
457 }
458
459 static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
460                 struct iommu_table *tbl, unsigned long entry)
461 {
462         enum dma_data_direction dir = DMA_NONE;
463         unsigned long hpa = 0;
464         long ret;
465
466         if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa,
467                                         &dir)))
468                 return H_TOO_HARD;
469
470         if (dir == DMA_NONE)
471                 return H_SUCCESS;
472
473         ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
474         if (ret != H_SUCCESS)
475                 iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
476
477         return ret;
478 }
479
480 static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
481                 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
482                 unsigned long entry)
483 {
484         unsigned long i, ret = H_SUCCESS;
485         unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
486         unsigned long io_entry = entry * subpages;
487
488         for (i = 0; i < subpages; ++i) {
489                 ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
490                 if (ret != H_SUCCESS)
491                         break;
492         }
493
494         iommu_tce_kill(tbl, io_entry, subpages);
495
496         return ret;
497 }
498
499 long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
500                 unsigned long entry, unsigned long ua,
501                 enum dma_data_direction dir)
502 {
503         long ret;
504         unsigned long hpa;
505         __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
506         struct mm_iommu_table_group_mem_t *mem;
507
508         if (!pua)
509                 /* it_userspace allocation might be delayed */
510                 return H_TOO_HARD;
511
512         mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
513         if (!mem)
514                 /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
515                 return H_TOO_HARD;
516
517         if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
518                 return H_TOO_HARD;
519
520         if (mm_iommu_mapped_inc(mem))
521                 return H_TOO_HARD;
522
523         ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
524         if (WARN_ON_ONCE(ret)) {
525                 mm_iommu_mapped_dec(mem);
526                 return H_TOO_HARD;
527         }
528
529         if (dir != DMA_NONE)
530                 kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
531
532         *pua = cpu_to_be64(ua);
533
534         return 0;
535 }
536
537 static long kvmppc_tce_iommu_map(struct kvm *kvm,
538                 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
539                 unsigned long entry, unsigned long ua,
540                 enum dma_data_direction dir)
541 {
542         unsigned long i, pgoff, ret = H_SUCCESS;
543         unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
544         unsigned long io_entry = entry * subpages;
545
546         for (i = 0, pgoff = 0; i < subpages;
547                         ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
548
549                 ret = kvmppc_tce_iommu_do_map(kvm, tbl,
550                                 io_entry + i, ua + pgoff, dir);
551                 if (ret != H_SUCCESS)
552                         break;
553         }
554
555         iommu_tce_kill(tbl, io_entry, subpages);
556
557         return ret;
558 }
559
560 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
561                       unsigned long ioba, unsigned long tce)
562 {
563         struct kvmppc_spapr_tce_table *stt;
564         long ret, idx;
565         struct kvmppc_spapr_tce_iommu_table *stit;
566         unsigned long entry, ua = 0;
567         enum dma_data_direction dir;
568
569         /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
570         /*          liobn, ioba, tce); */
571
572         stt = kvmppc_find_table(vcpu->kvm, liobn);
573         if (!stt)
574                 return H_TOO_HARD;
575
576         ret = kvmppc_ioba_validate(stt, ioba, 1);
577         if (ret != H_SUCCESS)
578                 return ret;
579
580         idx = srcu_read_lock(&vcpu->kvm->srcu);
581
582         ret = kvmppc_tce_validate(stt, tce);
583         if (ret != H_SUCCESS)
584                 goto unlock_exit;
585
586         dir = iommu_tce_direction(tce);
587
588         if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
589                 ret = H_PARAMETER;
590                 goto unlock_exit;
591         }
592
593         entry = ioba >> stt->page_shift;
594
595         list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
596                 if (dir == DMA_NONE)
597                         ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
598                                         stit->tbl, entry);
599                 else
600                         ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
601                                         entry, ua, dir);
602
603
604                 if (ret != H_SUCCESS) {
605                         kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
606                         goto unlock_exit;
607                 }
608         }
609
610         kvmppc_tce_put(stt, entry, tce);
611
612 unlock_exit:
613         srcu_read_unlock(&vcpu->kvm->srcu, idx);
614
615         return ret;
616 }
617 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
618
619 long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
620                 unsigned long liobn, unsigned long ioba,
621                 unsigned long tce_list, unsigned long npages)
622 {
623         struct kvmppc_spapr_tce_table *stt;
624         long i, ret = H_SUCCESS, idx;
625         unsigned long entry, ua = 0;
626         u64 __user *tces;
627         u64 tce;
628         struct kvmppc_spapr_tce_iommu_table *stit;
629
630         stt = kvmppc_find_table(vcpu->kvm, liobn);
631         if (!stt)
632                 return H_TOO_HARD;
633
634         entry = ioba >> stt->page_shift;
635         /*
636          * SPAPR spec says that the maximum size of the list is 512 TCEs
637          * so the whole table fits in 4K page
638          */
639         if (npages > 512)
640                 return H_PARAMETER;
641
642         if (tce_list & (SZ_4K - 1))
643                 return H_PARAMETER;
644
645         ret = kvmppc_ioba_validate(stt, ioba, npages);
646         if (ret != H_SUCCESS)
647                 return ret;
648
649         idx = srcu_read_lock(&vcpu->kvm->srcu);
650         if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
651                 ret = H_TOO_HARD;
652                 goto unlock_exit;
653         }
654         tces = (u64 __user *) ua;
655
656         for (i = 0; i < npages; ++i) {
657                 if (get_user(tce, tces + i)) {
658                         ret = H_TOO_HARD;
659                         goto unlock_exit;
660                 }
661                 tce = be64_to_cpu(tce);
662
663                 ret = kvmppc_tce_validate(stt, tce);
664                 if (ret != H_SUCCESS)
665                         goto unlock_exit;
666         }
667
668         for (i = 0; i < npages; ++i) {
669                 /*
670                  * This looks unsafe, because we validate, then regrab
671                  * the TCE from userspace which could have been changed by
672                  * another thread.
673                  *
674                  * But it actually is safe, because the relevant checks will be
675                  * re-executed in the following code.  If userspace tries to
676                  * change this dodgily it will result in a messier failure mode
677                  * but won't threaten the host.
678                  */
679                 if (get_user(tce, tces + i)) {
680                         ret = H_TOO_HARD;
681                         goto unlock_exit;
682                 }
683                 tce = be64_to_cpu(tce);
684
685                 if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
686                         ret = H_PARAMETER;
687                         goto unlock_exit;
688                 }
689
690                 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
691                         ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
692                                         stit->tbl, entry + i, ua,
693                                         iommu_tce_direction(tce));
694
695                         if (ret != H_SUCCESS) {
696                                 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
697                                                  entry + i);
698                                 goto unlock_exit;
699                         }
700                 }
701
702                 kvmppc_tce_put(stt, entry + i, tce);
703         }
704
705 unlock_exit:
706         srcu_read_unlock(&vcpu->kvm->srcu, idx);
707
708         return ret;
709 }
710 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
711
712 long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
713                 unsigned long liobn, unsigned long ioba,
714                 unsigned long tce_value, unsigned long npages)
715 {
716         struct kvmppc_spapr_tce_table *stt;
717         long i, ret;
718         struct kvmppc_spapr_tce_iommu_table *stit;
719
720         stt = kvmppc_find_table(vcpu->kvm, liobn);
721         if (!stt)
722                 return H_TOO_HARD;
723
724         ret = kvmppc_ioba_validate(stt, ioba, npages);
725         if (ret != H_SUCCESS)
726                 return ret;
727
728         /* Check permission bits only to allow userspace poison TCE for debug */
729         if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
730                 return H_PARAMETER;
731
732         list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
733                 unsigned long entry = ioba >> stt->page_shift;
734
735                 for (i = 0; i < npages; ++i) {
736                         ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
737                                         stit->tbl, entry + i);
738
739                         if (ret == H_SUCCESS)
740                                 continue;
741
742                         if (ret == H_TOO_HARD)
743                                 return ret;
744
745                         WARN_ON_ONCE(1);
746                         kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
747                 }
748         }
749
750         for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
751                 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
752
753         return ret;
754 }
755 EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);