1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
6 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/highmem.h>
14 #include <linux/gfp.h>
15 #include <linux/slab.h>
16 #include <linux/sched/signal.h>
17 #include <linux/hugetlb.h>
18 #include <linux/list.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/iommu.h>
21 #include <linux/file.h>
24 #include <asm/kvm_ppc.h>
25 #include <asm/kvm_book3s.h>
26 #include <asm/book3s/64/mmu-hash.h>
27 #include <asm/hvcall.h>
28 #include <asm/synch.h>
29 #include <asm/ppc-opcode.h>
31 #include <asm/iommu.h>
33 #include <asm/mmu_context.h>
35 static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
38 struct kvmppc_spapr_tce_table *stt;
40 list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
41 if (stt->liobn == liobn)
47 static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
49 return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
52 static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
54 unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
55 (tce_pages * sizeof(struct page *));
57 return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
60 static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
62 struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
63 struct kvmppc_spapr_tce_iommu_table, rcu);
65 iommu_tce_table_put(stit->tbl);
70 static void kvm_spapr_tce_liobn_put(struct kref *kref)
72 struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
73 struct kvmppc_spapr_tce_iommu_table, kref);
75 list_del_rcu(&stit->next);
77 call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
80 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
81 struct iommu_group *grp)
84 struct kvmppc_spapr_tce_table *stt;
85 struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
86 struct iommu_table_group *table_group = NULL;
89 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
91 table_group = iommu_group_get_iommudata(grp);
92 if (WARN_ON(!table_group))
95 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
96 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
97 if (table_group->tables[i] != stit->tbl)
100 kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
108 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
109 struct iommu_group *grp)
111 struct kvmppc_spapr_tce_table *stt = NULL;
113 struct iommu_table *tbl = NULL;
114 struct iommu_table_group *table_group;
116 struct kvmppc_spapr_tce_iommu_table *stit;
124 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
125 if (stt == f.file->private_data) {
137 table_group = iommu_group_get_iommudata(grp);
138 if (WARN_ON(!table_group))
141 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
142 struct iommu_table *tbltmp = table_group->tables[i];
146 /* Make sure hardware table parameters are compatible */
147 if ((tbltmp->it_page_shift <= stt->page_shift) &&
148 (tbltmp->it_offset << tbltmp->it_page_shift ==
149 stt->offset << stt->page_shift) &&
150 (tbltmp->it_size << tbltmp->it_page_shift >=
151 stt->size << stt->page_shift)) {
153 * Reference the table to avoid races with
154 * add/remove DMA windows.
156 tbl = iommu_tce_table_get(tbltmp);
164 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
165 if (tbl != stit->tbl)
168 if (!kref_get_unless_zero(&stit->kref)) {
169 /* stit is being destroyed */
170 iommu_tce_table_put(tbl);
175 * The table is already known to this KVM, we just increased
176 * its KVM reference counter and can return.
183 stit = kzalloc(sizeof(*stit), GFP_KERNEL);
185 iommu_tce_table_put(tbl);
190 kref_init(&stit->kref);
192 list_add_rcu(&stit->next, &stt->iommu_tables);
197 static void release_spapr_tce_table(struct rcu_head *head)
199 struct kvmppc_spapr_tce_table *stt = container_of(head,
200 struct kvmppc_spapr_tce_table, rcu);
201 unsigned long i, npages = kvmppc_tce_pages(stt->size);
203 for (i = 0; i < npages; i++)
205 __free_page(stt->pages[i]);
210 static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt,
211 unsigned long sttpage)
213 struct page *page = stt->pages[sttpage];
218 mutex_lock(&stt->alloc_lock);
219 page = stt->pages[sttpage];
221 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
224 stt->pages[sttpage] = page;
226 mutex_unlock(&stt->alloc_lock);
231 static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
233 struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
236 if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
237 return VM_FAULT_SIGBUS;
239 page = kvm_spapr_get_tce_page(stt, vmf->pgoff);
248 static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
249 .fault = kvm_spapr_tce_fault,
252 static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
254 vma->vm_ops = &kvm_spapr_tce_vm_ops;
258 static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
260 struct kvmppc_spapr_tce_table *stt = filp->private_data;
261 struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
262 struct kvm *kvm = stt->kvm;
264 mutex_lock(&kvm->lock);
265 list_del_rcu(&stt->list);
266 mutex_unlock(&kvm->lock);
268 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
269 WARN_ON(!kref_read(&stit->kref));
271 if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
276 account_locked_vm(kvm->mm,
277 kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
279 kvm_put_kvm(stt->kvm);
281 call_rcu(&stt->rcu, release_spapr_tce_table);
286 static const struct file_operations kvm_spapr_tce_fops = {
287 .mmap = kvm_spapr_tce_mmap,
288 .release = kvm_spapr_tce_release,
291 int kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
292 struct kvm_create_spapr_tce_64 *args)
294 struct kvmppc_spapr_tce_table *stt = NULL;
295 struct kvmppc_spapr_tce_table *siter;
296 struct mm_struct *mm = kvm->mm;
297 unsigned long npages;
300 if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
301 (args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
304 npages = kvmppc_tce_pages(args->size);
305 ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true);
310 stt = kzalloc(struct_size(stt, pages, npages), GFP_KERNEL | __GFP_NOWARN);
314 stt->liobn = args->liobn;
315 stt->page_shift = args->page_shift;
316 stt->offset = args->offset;
317 stt->size = args->size;
319 mutex_init(&stt->alloc_lock);
320 INIT_LIST_HEAD_RCU(&stt->iommu_tables);
322 mutex_lock(&kvm->lock);
324 /* Check this LIOBN hasn't been previously allocated */
326 list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
327 if (siter->liobn == args->liobn) {
335 ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
336 stt, O_RDWR | O_CLOEXEC);
339 list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
341 kvm_put_kvm_no_destroy(kvm);
343 mutex_unlock(&kvm->lock);
350 account_locked_vm(mm, kvmppc_stt_pages(npages), false);
354 static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
357 unsigned long gfn = tce >> PAGE_SHIFT;
358 struct kvm_memory_slot *memslot;
360 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
364 *ua = __gfn_to_hva_memslot(memslot, gfn) |
365 (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
370 static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
373 unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
374 enum dma_data_direction dir = iommu_tce_direction(tce);
375 struct kvmppc_spapr_tce_iommu_table *stit;
376 unsigned long ua = 0;
378 /* Allow userspace to poison TCE table */
382 if (iommu_tce_check_gpa(stt->page_shift, gpa))
385 if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
389 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
390 unsigned long hpa = 0;
391 struct mm_iommu_table_group_mem_t *mem;
392 long shift = stit->tbl->it_page_shift;
394 mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
395 if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) {
406 * Handles TCE requests for emulated devices.
407 * Puts guest TCE values to the table and expects user space to convert them.
408 * Cannot fail so kvmppc_tce_validate must be called before it.
410 static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
411 unsigned long idx, unsigned long tce)
415 unsigned long sttpage;
418 sttpage = idx / TCES_PER_PAGE;
419 page = stt->pages[sttpage];
422 /* We allow any TCE, not just with read|write permissions */
426 page = kvm_spapr_get_tce_page(stt, sttpage);
430 tbl = page_to_virt(page);
432 tbl[idx % TCES_PER_PAGE] = tce;
435 static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
436 struct iommu_table *tbl, unsigned long entry)
439 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
440 unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
442 for (i = 0; i < subpages; ++i) {
443 unsigned long hpa = 0;
444 enum dma_data_direction dir = DMA_NONE;
446 iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
450 static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
451 struct iommu_table *tbl, unsigned long entry)
453 struct mm_iommu_table_group_mem_t *mem = NULL;
454 const unsigned long pgsize = 1ULL << tbl->it_page_shift;
455 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
460 mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
464 mm_iommu_mapped_dec(mem);
466 *pua = cpu_to_be64(0);
471 static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
472 struct iommu_table *tbl, unsigned long entry)
474 enum dma_data_direction dir = DMA_NONE;
475 unsigned long hpa = 0;
478 if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa,
485 ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
486 if (ret != H_SUCCESS)
487 iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
492 static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
493 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
496 unsigned long i, ret = H_SUCCESS;
497 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
498 unsigned long io_entry = entry * subpages;
500 for (i = 0; i < subpages; ++i) {
501 ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
502 if (ret != H_SUCCESS)
506 iommu_tce_kill(tbl, io_entry, subpages);
511 static long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
512 unsigned long entry, unsigned long ua,
513 enum dma_data_direction dir)
517 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
518 struct mm_iommu_table_group_mem_t *mem;
521 /* it_userspace allocation might be delayed */
524 mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
526 /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
529 if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
532 if (mm_iommu_mapped_inc(mem))
535 ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
536 if (WARN_ON_ONCE(ret)) {
537 mm_iommu_mapped_dec(mem);
542 kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
544 *pua = cpu_to_be64(ua);
549 static long kvmppc_tce_iommu_map(struct kvm *kvm,
550 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
551 unsigned long entry, unsigned long ua,
552 enum dma_data_direction dir)
554 unsigned long i, pgoff, ret = H_SUCCESS;
555 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
556 unsigned long io_entry = entry * subpages;
558 for (i = 0, pgoff = 0; i < subpages;
559 ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
561 ret = kvmppc_tce_iommu_do_map(kvm, tbl,
562 io_entry + i, ua + pgoff, dir);
563 if (ret != H_SUCCESS)
567 iommu_tce_kill(tbl, io_entry, subpages);
572 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
573 unsigned long ioba, unsigned long tce)
575 struct kvmppc_spapr_tce_table *stt;
577 struct kvmppc_spapr_tce_iommu_table *stit;
578 unsigned long entry, ua = 0;
579 enum dma_data_direction dir;
581 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
582 /* liobn, ioba, tce); */
584 stt = kvmppc_find_table(vcpu->kvm, liobn);
588 ret = kvmppc_ioba_validate(stt, ioba, 1);
589 if (ret != H_SUCCESS)
592 idx = srcu_read_lock(&vcpu->kvm->srcu);
594 ret = kvmppc_tce_validate(stt, tce);
595 if (ret != H_SUCCESS)
598 dir = iommu_tce_direction(tce);
600 if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
605 entry = ioba >> stt->page_shift;
607 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
609 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
612 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
616 if (ret != H_SUCCESS) {
617 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
622 kvmppc_tce_put(stt, entry, tce);
625 srcu_read_unlock(&vcpu->kvm->srcu, idx);
629 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
631 long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
632 unsigned long liobn, unsigned long ioba,
633 unsigned long tce_list, unsigned long npages)
635 struct kvmppc_spapr_tce_table *stt;
636 long i, ret = H_SUCCESS, idx;
637 unsigned long entry, ua = 0;
640 struct kvmppc_spapr_tce_iommu_table *stit;
642 stt = kvmppc_find_table(vcpu->kvm, liobn);
646 entry = ioba >> stt->page_shift;
648 * SPAPR spec says that the maximum size of the list is 512 TCEs
649 * so the whole table fits in 4K page
654 if (tce_list & (SZ_4K - 1))
657 ret = kvmppc_ioba_validate(stt, ioba, npages);
658 if (ret != H_SUCCESS)
661 idx = srcu_read_lock(&vcpu->kvm->srcu);
662 if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
666 tces = (u64 __user *) ua;
668 for (i = 0; i < npages; ++i) {
669 if (get_user(tce, tces + i)) {
673 tce = be64_to_cpu(tce);
675 ret = kvmppc_tce_validate(stt, tce);
676 if (ret != H_SUCCESS)
680 for (i = 0; i < npages; ++i) {
682 * This looks unsafe, because we validate, then regrab
683 * the TCE from userspace which could have been changed by
686 * But it actually is safe, because the relevant checks will be
687 * re-executed in the following code. If userspace tries to
688 * change this dodgily it will result in a messier failure mode
689 * but won't threaten the host.
691 if (get_user(tce, tces + i)) {
695 tce = be64_to_cpu(tce);
697 if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
702 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
703 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
704 stit->tbl, entry + i, ua,
705 iommu_tce_direction(tce));
707 if (ret != H_SUCCESS) {
708 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
714 kvmppc_tce_put(stt, entry + i, tce);
718 srcu_read_unlock(&vcpu->kvm->srcu, idx);
722 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
724 long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
725 unsigned long liobn, unsigned long ioba,
726 unsigned long tce_value, unsigned long npages)
728 struct kvmppc_spapr_tce_table *stt;
730 struct kvmppc_spapr_tce_iommu_table *stit;
732 stt = kvmppc_find_table(vcpu->kvm, liobn);
736 ret = kvmppc_ioba_validate(stt, ioba, npages);
737 if (ret != H_SUCCESS)
740 /* Check permission bits only to allow userspace poison TCE for debug */
741 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
744 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
745 unsigned long entry = ioba >> stt->page_shift;
747 for (i = 0; i < npages; ++i) {
748 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
749 stit->tbl, entry + i);
751 if (ret == H_SUCCESS)
754 if (ret == H_TOO_HARD)
758 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
762 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
763 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
767 EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
769 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
772 struct kvmppc_spapr_tce_table *stt;
778 stt = kvmppc_find_table(vcpu->kvm, liobn);
782 ret = kvmppc_ioba_validate(stt, ioba, 1);
783 if (ret != H_SUCCESS)
786 idx = (ioba >> stt->page_shift) - stt->offset;
787 page = stt->pages[idx / TCES_PER_PAGE];
789 vcpu->arch.regs.gpr[4] = 0;
792 tbl = (u64 *)page_address(page);
794 vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
798 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);