1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
6 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/highmem.h>
14 #include <linux/gfp.h>
15 #include <linux/slab.h>
16 #include <linux/sched/signal.h>
17 #include <linux/hugetlb.h>
18 #include <linux/list.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/iommu.h>
21 #include <linux/file.h>
24 #include <asm/kvm_ppc.h>
25 #include <asm/kvm_book3s.h>
26 #include <asm/book3s/64/mmu-hash.h>
27 #include <asm/hvcall.h>
28 #include <asm/synch.h>
29 #include <asm/ppc-opcode.h>
30 #include <asm/kvm_host.h>
32 #include <asm/iommu.h>
34 #include <asm/mmu_context.h>
36 static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
38 return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
41 static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
43 unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
44 (tce_pages * sizeof(struct page *));
46 return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
49 static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
51 struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
52 struct kvmppc_spapr_tce_iommu_table, rcu);
54 iommu_tce_table_put(stit->tbl);
59 static void kvm_spapr_tce_liobn_put(struct kref *kref)
61 struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
62 struct kvmppc_spapr_tce_iommu_table, kref);
64 list_del_rcu(&stit->next);
66 call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
69 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
70 struct iommu_group *grp)
73 struct kvmppc_spapr_tce_table *stt;
74 struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
75 struct iommu_table_group *table_group = NULL;
78 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
80 table_group = iommu_group_get_iommudata(grp);
81 if (WARN_ON(!table_group))
84 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
85 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
86 if (table_group->tables[i] != stit->tbl)
89 kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
97 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
98 struct iommu_group *grp)
100 struct kvmppc_spapr_tce_table *stt = NULL;
102 struct iommu_table *tbl = NULL;
103 struct iommu_table_group *table_group;
105 struct kvmppc_spapr_tce_iommu_table *stit;
113 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
114 if (stt == f.file->private_data) {
126 table_group = iommu_group_get_iommudata(grp);
127 if (WARN_ON(!table_group))
130 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
131 struct iommu_table *tbltmp = table_group->tables[i];
135 /* Make sure hardware table parameters are compatible */
136 if ((tbltmp->it_page_shift <= stt->page_shift) &&
137 (tbltmp->it_offset << tbltmp->it_page_shift ==
138 stt->offset << stt->page_shift) &&
139 (tbltmp->it_size << tbltmp->it_page_shift >=
140 stt->size << stt->page_shift)) {
142 * Reference the table to avoid races with
143 * add/remove DMA windows.
145 tbl = iommu_tce_table_get(tbltmp);
153 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
154 if (tbl != stit->tbl)
157 if (!kref_get_unless_zero(&stit->kref)) {
158 /* stit is being destroyed */
159 iommu_tce_table_put(tbl);
164 * The table is already known to this KVM, we just increased
165 * its KVM reference counter and can return.
172 stit = kzalloc(sizeof(*stit), GFP_KERNEL);
174 iommu_tce_table_put(tbl);
179 kref_init(&stit->kref);
181 list_add_rcu(&stit->next, &stt->iommu_tables);
186 static void release_spapr_tce_table(struct rcu_head *head)
188 struct kvmppc_spapr_tce_table *stt = container_of(head,
189 struct kvmppc_spapr_tce_table, rcu);
190 unsigned long i, npages = kvmppc_tce_pages(stt->size);
192 for (i = 0; i < npages; i++)
194 __free_page(stt->pages[i]);
199 static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt,
200 unsigned long sttpage)
202 struct page *page = stt->pages[sttpage];
207 mutex_lock(&stt->alloc_lock);
208 page = stt->pages[sttpage];
210 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
213 stt->pages[sttpage] = page;
215 mutex_unlock(&stt->alloc_lock);
220 static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
222 struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
225 if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
226 return VM_FAULT_SIGBUS;
228 page = kvm_spapr_get_tce_page(stt, vmf->pgoff);
237 static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
238 .fault = kvm_spapr_tce_fault,
241 static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
243 vma->vm_ops = &kvm_spapr_tce_vm_ops;
247 static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
249 struct kvmppc_spapr_tce_table *stt = filp->private_data;
250 struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
251 struct kvm *kvm = stt->kvm;
253 mutex_lock(&kvm->lock);
254 list_del_rcu(&stt->list);
255 mutex_unlock(&kvm->lock);
257 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
258 WARN_ON(!kref_read(&stit->kref));
260 if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
265 kvm_put_kvm(stt->kvm);
267 account_locked_vm(current->mm,
268 kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
269 call_rcu(&stt->rcu, release_spapr_tce_table);
274 static const struct file_operations kvm_spapr_tce_fops = {
275 .mmap = kvm_spapr_tce_mmap,
276 .release = kvm_spapr_tce_release,
279 long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
280 struct kvm_create_spapr_tce_64 *args)
282 struct kvmppc_spapr_tce_table *stt = NULL;
283 struct kvmppc_spapr_tce_table *siter;
284 unsigned long npages, size = args->size;
287 if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
288 (args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
291 npages = kvmppc_tce_pages(size);
292 ret = account_locked_vm(current->mm, kvmppc_stt_pages(npages), true);
297 stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
302 stt->liobn = args->liobn;
303 stt->page_shift = args->page_shift;
304 stt->offset = args->offset;
307 mutex_init(&stt->alloc_lock);
308 INIT_LIST_HEAD_RCU(&stt->iommu_tables);
310 mutex_lock(&kvm->lock);
312 /* Check this LIOBN hasn't been previously allocated */
314 list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
315 if (siter->liobn == args->liobn) {
323 ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
324 stt, O_RDWR | O_CLOEXEC);
327 list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
331 mutex_unlock(&kvm->lock);
338 account_locked_vm(current->mm, kvmppc_stt_pages(npages), false);
342 static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
345 unsigned long gfn = tce >> PAGE_SHIFT;
346 struct kvm_memory_slot *memslot;
348 memslot = search_memslots(kvm_memslots(kvm), gfn);
352 *ua = __gfn_to_hva_memslot(memslot, gfn) |
353 (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
358 static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
361 unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
362 enum dma_data_direction dir = iommu_tce_direction(tce);
363 struct kvmppc_spapr_tce_iommu_table *stit;
364 unsigned long ua = 0;
366 /* Allow userspace to poison TCE table */
370 if (iommu_tce_check_gpa(stt->page_shift, gpa))
373 if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
377 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
378 unsigned long hpa = 0;
379 struct mm_iommu_table_group_mem_t *mem;
380 long shift = stit->tbl->it_page_shift;
382 mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
383 if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) {
394 * Handles TCE requests for emulated devices.
395 * Puts guest TCE values to the table and expects user space to convert them.
396 * Cannot fail so kvmppc_tce_validate must be called before it.
398 static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
399 unsigned long idx, unsigned long tce)
403 unsigned long sttpage;
406 sttpage = idx / TCES_PER_PAGE;
407 page = stt->pages[sttpage];
410 /* We allow any TCE, not just with read|write permissions */
414 page = kvm_spapr_get_tce_page(stt, sttpage);
418 tbl = page_to_virt(page);
420 tbl[idx % TCES_PER_PAGE] = tce;
423 static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
424 struct iommu_table *tbl, unsigned long entry)
427 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
428 unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
430 for (i = 0; i < subpages; ++i) {
431 unsigned long hpa = 0;
432 enum dma_data_direction dir = DMA_NONE;
434 iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
438 static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
439 struct iommu_table *tbl, unsigned long entry)
441 struct mm_iommu_table_group_mem_t *mem = NULL;
442 const unsigned long pgsize = 1ULL << tbl->it_page_shift;
443 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
448 mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
452 mm_iommu_mapped_dec(mem);
454 *pua = cpu_to_be64(0);
459 static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
460 struct iommu_table *tbl, unsigned long entry)
462 enum dma_data_direction dir = DMA_NONE;
463 unsigned long hpa = 0;
466 if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa,
473 ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
474 if (ret != H_SUCCESS)
475 iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
480 static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
481 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
484 unsigned long i, ret = H_SUCCESS;
485 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
486 unsigned long io_entry = entry * subpages;
488 for (i = 0; i < subpages; ++i) {
489 ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
490 if (ret != H_SUCCESS)
494 iommu_tce_kill(tbl, io_entry, subpages);
499 long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
500 unsigned long entry, unsigned long ua,
501 enum dma_data_direction dir)
505 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
506 struct mm_iommu_table_group_mem_t *mem;
509 /* it_userspace allocation might be delayed */
512 mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
514 /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
517 if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
520 if (mm_iommu_mapped_inc(mem))
523 ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
524 if (WARN_ON_ONCE(ret)) {
525 mm_iommu_mapped_dec(mem);
530 kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
532 *pua = cpu_to_be64(ua);
537 static long kvmppc_tce_iommu_map(struct kvm *kvm,
538 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
539 unsigned long entry, unsigned long ua,
540 enum dma_data_direction dir)
542 unsigned long i, pgoff, ret = H_SUCCESS;
543 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
544 unsigned long io_entry = entry * subpages;
546 for (i = 0, pgoff = 0; i < subpages;
547 ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
549 ret = kvmppc_tce_iommu_do_map(kvm, tbl,
550 io_entry + i, ua + pgoff, dir);
551 if (ret != H_SUCCESS)
555 iommu_tce_kill(tbl, io_entry, subpages);
560 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
561 unsigned long ioba, unsigned long tce)
563 struct kvmppc_spapr_tce_table *stt;
565 struct kvmppc_spapr_tce_iommu_table *stit;
566 unsigned long entry, ua = 0;
567 enum dma_data_direction dir;
569 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
570 /* liobn, ioba, tce); */
572 stt = kvmppc_find_table(vcpu->kvm, liobn);
576 ret = kvmppc_ioba_validate(stt, ioba, 1);
577 if (ret != H_SUCCESS)
580 idx = srcu_read_lock(&vcpu->kvm->srcu);
582 ret = kvmppc_tce_validate(stt, tce);
583 if (ret != H_SUCCESS)
586 dir = iommu_tce_direction(tce);
588 if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
593 entry = ioba >> stt->page_shift;
595 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
597 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
600 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
604 if (ret != H_SUCCESS) {
605 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
610 kvmppc_tce_put(stt, entry, tce);
613 srcu_read_unlock(&vcpu->kvm->srcu, idx);
617 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
619 long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
620 unsigned long liobn, unsigned long ioba,
621 unsigned long tce_list, unsigned long npages)
623 struct kvmppc_spapr_tce_table *stt;
624 long i, ret = H_SUCCESS, idx;
625 unsigned long entry, ua = 0;
628 struct kvmppc_spapr_tce_iommu_table *stit;
630 stt = kvmppc_find_table(vcpu->kvm, liobn);
634 entry = ioba >> stt->page_shift;
636 * SPAPR spec says that the maximum size of the list is 512 TCEs
637 * so the whole table fits in 4K page
642 if (tce_list & (SZ_4K - 1))
645 ret = kvmppc_ioba_validate(stt, ioba, npages);
646 if (ret != H_SUCCESS)
649 idx = srcu_read_lock(&vcpu->kvm->srcu);
650 if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
654 tces = (u64 __user *) ua;
656 for (i = 0; i < npages; ++i) {
657 if (get_user(tce, tces + i)) {
661 tce = be64_to_cpu(tce);
663 ret = kvmppc_tce_validate(stt, tce);
664 if (ret != H_SUCCESS)
668 for (i = 0; i < npages; ++i) {
670 * This looks unsafe, because we validate, then regrab
671 * the TCE from userspace which could have been changed by
674 * But it actually is safe, because the relevant checks will be
675 * re-executed in the following code. If userspace tries to
676 * change this dodgily it will result in a messier failure mode
677 * but won't threaten the host.
679 if (get_user(tce, tces + i)) {
683 tce = be64_to_cpu(tce);
685 if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
690 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
691 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
692 stit->tbl, entry + i, ua,
693 iommu_tce_direction(tce));
695 if (ret != H_SUCCESS) {
696 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
702 kvmppc_tce_put(stt, entry + i, tce);
706 srcu_read_unlock(&vcpu->kvm->srcu, idx);
710 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
712 long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
713 unsigned long liobn, unsigned long ioba,
714 unsigned long tce_value, unsigned long npages)
716 struct kvmppc_spapr_tce_table *stt;
718 struct kvmppc_spapr_tce_iommu_table *stit;
720 stt = kvmppc_find_table(vcpu->kvm, liobn);
724 ret = kvmppc_ioba_validate(stt, ioba, npages);
725 if (ret != H_SUCCESS)
728 /* Check permission bits only to allow userspace poison TCE for debug */
729 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
732 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
733 unsigned long entry = ioba >> stt->page_shift;
735 for (i = 0; i < npages; ++i) {
736 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
737 stit->tbl, entry + i);
739 if (ret == H_SUCCESS)
742 if (ret == H_TOO_HARD)
746 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
750 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
751 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
755 EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);