GNU Linux-libre 4.14.259-gnu1
[releases.git] / arch / powerpc / kvm / book3s_64_vio.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16  * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
17  * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
18  */
19
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/sched/signal.h>
28 #include <linux/hugetlb.h>
29 #include <linux/list.h>
30 #include <linux/anon_inodes.h>
31 #include <linux/iommu.h>
32 #include <linux/file.h>
33
34 #include <asm/tlbflush.h>
35 #include <asm/kvm_ppc.h>
36 #include <asm/kvm_book3s.h>
37 #include <asm/book3s/64/mmu-hash.h>
38 #include <asm/hvcall.h>
39 #include <asm/synch.h>
40 #include <asm/ppc-opcode.h>
41 #include <asm/kvm_host.h>
42 #include <asm/udbg.h>
43 #include <asm/iommu.h>
44 #include <asm/tce.h>
45 #include <asm/mmu_context.h>
46
47 static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
48 {
49         return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
50 }
51
52 static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
53 {
54         unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
55                         (tce_pages * sizeof(struct page *));
56
57         return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
58 }
59
60 static long kvmppc_account_memlimit(unsigned long stt_pages, bool inc)
61 {
62         long ret = 0;
63
64         if (!current || !current->mm)
65                 return ret; /* process exited */
66
67         down_write(&current->mm->mmap_sem);
68
69         if (inc) {
70                 unsigned long locked, lock_limit;
71
72                 locked = current->mm->locked_vm + stt_pages;
73                 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
74                 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
75                         ret = -ENOMEM;
76                 else
77                         current->mm->locked_vm += stt_pages;
78         } else {
79                 if (WARN_ON_ONCE(stt_pages > current->mm->locked_vm))
80                         stt_pages = current->mm->locked_vm;
81
82                 current->mm->locked_vm -= stt_pages;
83         }
84
85         pr_debug("[%d] RLIMIT_MEMLOCK KVM %c%ld %ld/%ld%s\n", current->pid,
86                         inc ? '+' : '-',
87                         stt_pages << PAGE_SHIFT,
88                         current->mm->locked_vm << PAGE_SHIFT,
89                         rlimit(RLIMIT_MEMLOCK),
90                         ret ? " - exceeded" : "");
91
92         up_write(&current->mm->mmap_sem);
93
94         return ret;
95 }
96
97 static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
98 {
99         struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
100                         struct kvmppc_spapr_tce_iommu_table, rcu);
101
102         iommu_tce_table_put(stit->tbl);
103
104         kfree(stit);
105 }
106
107 static void kvm_spapr_tce_liobn_put(struct kref *kref)
108 {
109         struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
110                         struct kvmppc_spapr_tce_iommu_table, kref);
111
112         list_del_rcu(&stit->next);
113
114         call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
115 }
116
117 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
118                 struct iommu_group *grp)
119 {
120         int i;
121         struct kvmppc_spapr_tce_table *stt;
122         struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
123         struct iommu_table_group *table_group = NULL;
124
125         list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
126
127                 table_group = iommu_group_get_iommudata(grp);
128                 if (WARN_ON(!table_group))
129                         continue;
130
131                 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
132                         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
133                                 if (table_group->tables[i] != stit->tbl)
134                                         continue;
135
136                                 kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
137                         }
138                 }
139         }
140 }
141
142 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
143                 struct iommu_group *grp)
144 {
145         struct kvmppc_spapr_tce_table *stt = NULL;
146         bool found = false;
147         struct iommu_table *tbl = NULL;
148         struct iommu_table_group *table_group;
149         long i;
150         struct kvmppc_spapr_tce_iommu_table *stit;
151         struct fd f;
152
153         f = fdget(tablefd);
154         if (!f.file)
155                 return -EBADF;
156
157         list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
158                 if (stt == f.file->private_data) {
159                         found = true;
160                         break;
161                 }
162         }
163
164         fdput(f);
165
166         if (!found)
167                 return -EINVAL;
168
169         table_group = iommu_group_get_iommudata(grp);
170         if (WARN_ON(!table_group))
171                 return -EFAULT;
172
173         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
174                 struct iommu_table *tbltmp = table_group->tables[i];
175
176                 if (!tbltmp)
177                         continue;
178                 /*
179                  * Make sure hardware table parameters are exactly the same;
180                  * this is used in the TCE handlers where boundary checks
181                  * use only the first attached table.
182                  */
183                 if ((tbltmp->it_page_shift == stt->page_shift) &&
184                                 (tbltmp->it_offset == stt->offset) &&
185                                 (tbltmp->it_size == stt->size)) {
186                         /*
187                          * Reference the table to avoid races with
188                          * add/remove DMA windows.
189                          */
190                         tbl = iommu_tce_table_get(tbltmp);
191                         break;
192                 }
193         }
194         if (!tbl)
195                 return -EINVAL;
196
197         list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
198                 if (tbl != stit->tbl)
199                         continue;
200
201                 if (!kref_get_unless_zero(&stit->kref)) {
202                         /* stit is being destroyed */
203                         iommu_tce_table_put(tbl);
204                         return -ENOTTY;
205                 }
206                 /*
207                  * The table is already known to this KVM, we just increased
208                  * its KVM reference counter and can return.
209                  */
210                 return 0;
211         }
212
213         stit = kzalloc(sizeof(*stit), GFP_KERNEL);
214         if (!stit) {
215                 iommu_tce_table_put(tbl);
216                 return -ENOMEM;
217         }
218
219         stit->tbl = tbl;
220         kref_init(&stit->kref);
221
222         list_add_rcu(&stit->next, &stt->iommu_tables);
223
224         return 0;
225 }
226
227 static void release_spapr_tce_table(struct rcu_head *head)
228 {
229         struct kvmppc_spapr_tce_table *stt = container_of(head,
230                         struct kvmppc_spapr_tce_table, rcu);
231         unsigned long i, npages = kvmppc_tce_pages(stt->size);
232
233         for (i = 0; i < npages; i++)
234                 __free_page(stt->pages[i]);
235
236         kfree(stt);
237 }
238
239 static int kvm_spapr_tce_fault(struct vm_fault *vmf)
240 {
241         struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
242         struct page *page;
243
244         if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
245                 return VM_FAULT_SIGBUS;
246
247         page = stt->pages[vmf->pgoff];
248         get_page(page);
249         vmf->page = page;
250         return 0;
251 }
252
253 static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
254         .fault = kvm_spapr_tce_fault,
255 };
256
257 static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
258 {
259         vma->vm_ops = &kvm_spapr_tce_vm_ops;
260         return 0;
261 }
262
263 static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
264 {
265         struct kvmppc_spapr_tce_table *stt = filp->private_data;
266         struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
267         struct kvm *kvm = stt->kvm;
268
269         mutex_lock(&kvm->lock);
270         list_del_rcu(&stt->list);
271         mutex_unlock(&kvm->lock);
272
273         list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
274                 WARN_ON(!kref_read(&stit->kref));
275                 while (1) {
276                         if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
277                                 break;
278                 }
279         }
280
281         kvm_put_kvm(stt->kvm);
282
283         kvmppc_account_memlimit(
284                 kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
285         call_rcu(&stt->rcu, release_spapr_tce_table);
286
287         return 0;
288 }
289
290 static const struct file_operations kvm_spapr_tce_fops = {
291         .mmap           = kvm_spapr_tce_mmap,
292         .release        = kvm_spapr_tce_release,
293 };
294
295 long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
296                                    struct kvm_create_spapr_tce_64 *args)
297 {
298         struct kvmppc_spapr_tce_table *stt = NULL;
299         struct kvmppc_spapr_tce_table *siter;
300         unsigned long npages, size;
301         int ret = -ENOMEM;
302         int i;
303
304         if (!args->size)
305                 return -EINVAL;
306
307         size = _ALIGN_UP(args->size, PAGE_SIZE >> 3);
308         npages = kvmppc_tce_pages(size);
309         ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
310         if (ret)
311                 return ret;
312
313         ret = -ENOMEM;
314         stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
315                       GFP_KERNEL);
316         if (!stt)
317                 goto fail_acct;
318
319         stt->liobn = args->liobn;
320         stt->page_shift = args->page_shift;
321         stt->offset = args->offset;
322         stt->size = size;
323         stt->kvm = kvm;
324         INIT_LIST_HEAD_RCU(&stt->iommu_tables);
325
326         for (i = 0; i < npages; i++) {
327                 stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
328                 if (!stt->pages[i])
329                         goto fail;
330         }
331
332         mutex_lock(&kvm->lock);
333
334         /* Check this LIOBN hasn't been previously allocated */
335         ret = 0;
336         list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
337                 if (siter->liobn == args->liobn) {
338                         ret = -EBUSY;
339                         break;
340                 }
341         }
342
343         if (!ret)
344                 ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
345                                        stt, O_RDWR | O_CLOEXEC);
346
347         if (ret >= 0) {
348                 list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
349                 kvm_get_kvm(kvm);
350         }
351
352         mutex_unlock(&kvm->lock);
353
354         if (ret >= 0)
355                 return ret;
356
357  fail:
358         for (i = 0; i < npages; i++)
359                 if (stt->pages[i])
360                         __free_page(stt->pages[i]);
361
362         kfree(stt);
363  fail_acct:
364         kvmppc_account_memlimit(kvmppc_stt_pages(npages), false);
365         return ret;
366 }
367
368 static void kvmppc_clear_tce(struct iommu_table *tbl, unsigned long entry)
369 {
370         unsigned long hpa = 0;
371         enum dma_data_direction dir = DMA_NONE;
372
373         iommu_tce_xchg(tbl, entry, &hpa, &dir);
374 }
375
376 static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
377                 struct iommu_table *tbl, unsigned long entry)
378 {
379         struct mm_iommu_table_group_mem_t *mem = NULL;
380         const unsigned long pgsize = 1ULL << tbl->it_page_shift;
381         unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
382
383         if (!pua)
384                 /* it_userspace allocation might be delayed */
385                 return H_TOO_HARD;
386
387         mem = mm_iommu_lookup(kvm->mm, *pua, pgsize);
388         if (!mem)
389                 return H_TOO_HARD;
390
391         mm_iommu_mapped_dec(mem);
392
393         *pua = 0;
394
395         return H_SUCCESS;
396 }
397
398 static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
399                 struct iommu_table *tbl, unsigned long entry)
400 {
401         enum dma_data_direction dir = DMA_NONE;
402         unsigned long hpa = 0;
403         long ret;
404
405         if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir)))
406                 return H_TOO_HARD;
407
408         if (dir == DMA_NONE)
409                 return H_SUCCESS;
410
411         ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
412         if (ret != H_SUCCESS)
413                 iommu_tce_xchg(tbl, entry, &hpa, &dir);
414
415         return ret;
416 }
417
418 long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
419                 unsigned long entry, unsigned long ua,
420                 enum dma_data_direction dir)
421 {
422         long ret;
423         unsigned long hpa, *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
424         struct mm_iommu_table_group_mem_t *mem;
425
426         if (!pua)
427                 /* it_userspace allocation might be delayed */
428                 return H_TOO_HARD;
429
430         mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
431         if (!mem)
432                 /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
433                 return H_TOO_HARD;
434
435         if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
436                 return H_TOO_HARD;
437
438         if (mm_iommu_mapped_inc(mem))
439                 return H_TOO_HARD;
440
441         ret = iommu_tce_xchg(tbl, entry, &hpa, &dir);
442         if (WARN_ON_ONCE(ret)) {
443                 mm_iommu_mapped_dec(mem);
444                 return H_TOO_HARD;
445         }
446
447         if (dir != DMA_NONE)
448                 kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
449
450         *pua = ua;
451
452         return 0;
453 }
454
455 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
456                       unsigned long ioba, unsigned long tce)
457 {
458         struct kvmppc_spapr_tce_table *stt;
459         long ret, idx;
460         struct kvmppc_spapr_tce_iommu_table *stit;
461         unsigned long entry, ua = 0;
462         enum dma_data_direction dir;
463
464         /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
465         /*          liobn, ioba, tce); */
466
467         stt = kvmppc_find_table(vcpu->kvm, liobn);
468         if (!stt)
469                 return H_TOO_HARD;
470
471         ret = kvmppc_ioba_validate(stt, ioba, 1);
472         if (ret != H_SUCCESS)
473                 return ret;
474
475         ret = kvmppc_tce_validate(stt, tce);
476         if (ret != H_SUCCESS)
477                 return ret;
478
479         dir = iommu_tce_direction(tce);
480
481         idx = srcu_read_lock(&vcpu->kvm->srcu);
482
483         if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
484                         tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) {
485                 ret = H_PARAMETER;
486                 goto unlock_exit;
487         }
488
489         entry = ioba >> stt->page_shift;
490
491         list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
492                 if (dir == DMA_NONE)
493                         ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
494                                         stit->tbl, entry);
495                 else
496                         ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl,
497                                         entry, ua, dir);
498
499                 if (ret == H_SUCCESS)
500                         continue;
501
502                 if (ret == H_TOO_HARD)
503                         goto unlock_exit;
504
505                 WARN_ON_ONCE(1);
506                 kvmppc_clear_tce(stit->tbl, entry);
507         }
508
509         kvmppc_tce_put(stt, entry, tce);
510
511 unlock_exit:
512         srcu_read_unlock(&vcpu->kvm->srcu, idx);
513
514         return ret;
515 }
516 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
517
518 long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
519                 unsigned long liobn, unsigned long ioba,
520                 unsigned long tce_list, unsigned long npages)
521 {
522         struct kvmppc_spapr_tce_table *stt;
523         long i, ret = H_SUCCESS, idx;
524         unsigned long entry, ua = 0;
525         u64 __user *tces;
526         u64 tce;
527         struct kvmppc_spapr_tce_iommu_table *stit;
528
529         stt = kvmppc_find_table(vcpu->kvm, liobn);
530         if (!stt)
531                 return H_TOO_HARD;
532
533         entry = ioba >> stt->page_shift;
534         /*
535          * SPAPR spec says that the maximum size of the list is 512 TCEs
536          * so the whole table fits in 4K page
537          */
538         if (npages > 512)
539                 return H_PARAMETER;
540
541         if (tce_list & (SZ_4K - 1))
542                 return H_PARAMETER;
543
544         ret = kvmppc_ioba_validate(stt, ioba, npages);
545         if (ret != H_SUCCESS)
546                 return ret;
547
548         idx = srcu_read_lock(&vcpu->kvm->srcu);
549         if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
550                 ret = H_TOO_HARD;
551                 goto unlock_exit;
552         }
553         tces = (u64 __user *) ua;
554
555         for (i = 0; i < npages; ++i) {
556                 if (get_user(tce, tces + i)) {
557                         ret = H_TOO_HARD;
558                         goto unlock_exit;
559                 }
560                 tce = be64_to_cpu(tce);
561
562                 ret = kvmppc_tce_validate(stt, tce);
563                 if (ret != H_SUCCESS)
564                         goto unlock_exit;
565
566                 if (kvmppc_gpa_to_ua(vcpu->kvm,
567                                 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
568                                 &ua, NULL)) {
569                         ret = H_PARAMETER;
570                         goto unlock_exit;
571                 }
572
573                 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
574                         ret = kvmppc_tce_iommu_map(vcpu->kvm,
575                                         stit->tbl, entry + i, ua,
576                                         iommu_tce_direction(tce));
577
578                         if (ret == H_SUCCESS)
579                                 continue;
580
581                         if (ret == H_TOO_HARD)
582                                 goto unlock_exit;
583
584                         WARN_ON_ONCE(1);
585                         kvmppc_clear_tce(stit->tbl, entry);
586                 }
587
588                 kvmppc_tce_put(stt, entry + i, tce);
589         }
590
591 unlock_exit:
592         srcu_read_unlock(&vcpu->kvm->srcu, idx);
593
594         return ret;
595 }
596 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
597
598 long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
599                 unsigned long liobn, unsigned long ioba,
600                 unsigned long tce_value, unsigned long npages)
601 {
602         struct kvmppc_spapr_tce_table *stt;
603         long i, ret;
604         struct kvmppc_spapr_tce_iommu_table *stit;
605
606         stt = kvmppc_find_table(vcpu->kvm, liobn);
607         if (!stt)
608                 return H_TOO_HARD;
609
610         ret = kvmppc_ioba_validate(stt, ioba, npages);
611         if (ret != H_SUCCESS)
612                 return ret;
613
614         /* Check permission bits only to allow userspace poison TCE for debug */
615         if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
616                 return H_PARAMETER;
617
618         list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
619                 unsigned long entry = ioba >> stit->tbl->it_page_shift;
620
621                 for (i = 0; i < npages; ++i) {
622                         ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
623                                         stit->tbl, entry + i);
624
625                         if (ret == H_SUCCESS)
626                                 continue;
627
628                         if (ret == H_TOO_HARD)
629                                 return ret;
630
631                         WARN_ON_ONCE(1);
632                         kvmppc_clear_tce(stit->tbl, entry);
633                 }
634         }
635
636         for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
637                 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
638
639         return H_SUCCESS;
640 }
641 EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);