2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
14 #include <asm/kvm_ppc.h>
15 #include <asm/kvm_book3s.h>
18 #include <asm/pgtable.h>
19 #include <asm/pgalloc.h>
20 #include <asm/pte-walk.h>
22 static void mark_pages_dirty(struct kvm *kvm, struct kvm_memory_slot *memslot,
23 unsigned long gfn, unsigned int order);
26 * Supported radix tree geometry.
27 * Like p9, we support either 5 or 9 bits at the first (lowest) level,
28 * for a page size of 64k or 4k.
30 static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
32 int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
33 struct kvmppc_pte *gpte, bool data, bool iswrite)
35 struct kvm *kvm = vcpu->kvm;
40 unsigned long root, pte, index;
41 unsigned long rts, bits, offset;
43 unsigned long proc_tbl_size;
45 /* Work out effective PID */
46 switch (eaddr >> 62) {
56 proc_tbl_size = 1 << ((kvm->arch.process_table & PRTS_MASK) + 12);
57 if (pid * 16 >= proc_tbl_size)
60 /* Read partition table to find root of tree for effective PID */
61 ptbl = (kvm->arch.process_table & PRTB_MASK) + (pid * 16);
62 ret = kvm_read_guest(kvm, ptbl, &prte, sizeof(prte));
66 root = be64_to_cpu(prte);
67 rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
68 ((root & RTS2_MASK) >> RTS2_SHIFT);
69 bits = root & RPDS_MASK;
70 root = root & RPDB_MASK;
72 /* P9 DD1 interprets RTS (radix tree size) differently */
74 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
77 /* current implementations only support 52-bit space */
81 for (level = 3; level >= 0; --level) {
82 if (level && bits != p9_supported_radix_bits[level])
84 if (level == 0 && !(bits == 5 || bits == 9))
87 index = (eaddr >> offset) & ((1UL << bits) - 1);
88 /* check that low bits of page table base are zero */
89 if (root & ((1UL << (bits + 3)) - 1))
91 ret = kvm_read_guest(kvm, root + index * 8,
95 pte = __be64_to_cpu(rpte);
96 if (!(pte & _PAGE_PRESENT))
101 root = pte & 0x0fffffffffffff00ul;
103 /* need a leaf at lowest level; 512GB pages not supported */
104 if (level < 0 || level == 3)
107 /* offset is now log base 2 of the page size */
108 gpa = pte & 0x01fffffffffff000ul;
109 if (gpa & ((1ul << offset) - 1))
111 gpa += eaddr & ((1ul << offset) - 1);
112 for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps)
113 if (offset == mmu_psize_defs[ps].shift)
115 gpte->page_size = ps;
120 /* Work out permissions */
121 gpte->may_read = !!(pte & _PAGE_READ);
122 gpte->may_write = !!(pte & _PAGE_WRITE);
123 gpte->may_execute = !!(pte & _PAGE_EXEC);
124 if (kvmppc_get_msr(vcpu) & MSR_PR) {
125 if (pte & _PAGE_PRIVILEGED) {
128 gpte->may_execute = 0;
131 if (!(pte & _PAGE_PRIVILEGED)) {
132 /* Check AMR/IAMR to see if strict mode is in force */
133 if (vcpu->arch.amr & (1ul << 62))
135 if (vcpu->arch.amr & (1ul << 63))
137 if (vcpu->arch.iamr & (1ul << 62))
138 gpte->may_execute = 0;
145 #ifdef CONFIG_PPC_64K_PAGES
146 #define MMU_BASE_PSIZE MMU_PAGE_64K
148 #define MMU_BASE_PSIZE MMU_PAGE_4K
151 static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
154 int psize = MMU_BASE_PSIZE;
156 if (pshift >= PMD_SHIFT)
159 addr |= mmu_psize_defs[psize].ap << 5;
160 asm volatile("ptesync": : :"memory");
161 asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
162 : : "r" (addr), "r" (kvm->arch.lpid) : "memory");
163 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG))
164 asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
165 : : "r" (addr), "r" (kvm->arch.lpid) : "memory");
166 asm volatile("ptesync": : :"memory");
169 unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
170 unsigned long clr, unsigned long set,
171 unsigned long addr, unsigned int shift)
173 unsigned long old = 0;
175 if (!(clr & _PAGE_PRESENT) && cpu_has_feature(CPU_FTR_POWER9_DD1) &&
176 pte_present(*ptep)) {
177 /* have to invalidate it first */
178 old = __radix_pte_update(ptep, _PAGE_PRESENT, 0);
179 kvmppc_radix_tlbie_page(kvm, addr, shift);
180 set |= _PAGE_PRESENT;
181 old &= _PAGE_PRESENT;
183 return __radix_pte_update(ptep, clr, set) | old;
186 void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
187 pte_t *ptep, pte_t pte)
189 radix__set_pte_at(kvm->mm, addr, ptep, pte, 0);
192 static struct kmem_cache *kvm_pte_cache;
194 static pte_t *kvmppc_pte_alloc(void)
196 return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
199 static void kvmppc_pte_free(pte_t *ptep)
201 kmem_cache_free(kvm_pte_cache, ptep);
204 /* Like pmd_huge() and pmd_large(), but works regardless of config options */
205 static inline int pmd_is_leaf(pmd_t pmd)
207 return !!(pmd_val(pmd) & _PAGE_PTE);
210 static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
211 unsigned int level, unsigned long mmu_seq)
214 pud_t *pud, *new_pud = NULL;
215 pmd_t *pmd, *new_pmd = NULL;
216 pte_t *ptep, *new_ptep = NULL;
220 /* Traverse the guest's 2nd-level tree, allocate new levels needed */
221 pgd = kvm->arch.pgtable + pgd_index(gpa);
223 if (pgd_present(*pgd))
224 pud = pud_offset(pgd, gpa);
226 new_pud = pud_alloc_one(kvm->mm, gpa);
229 if (pud && pud_present(*pud))
230 pmd = pmd_offset(pud, gpa);
232 new_pmd = pmd_alloc_one(kvm->mm, gpa);
234 if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
235 new_ptep = kvmppc_pte_alloc();
237 /* Check if we might have been invalidated; let the guest retry if so */
238 spin_lock(&kvm->mmu_lock);
240 if (mmu_notifier_retry(kvm, mmu_seq))
243 /* Now traverse again under the lock and change the tree */
245 if (pgd_none(*pgd)) {
248 pgd_populate(kvm->mm, pgd, new_pud);
251 pud = pud_offset(pgd, gpa);
252 if (pud_none(*pud)) {
255 pud_populate(kvm->mm, pud, new_pmd);
258 pmd = pmd_offset(pud, gpa);
259 if (pmd_is_leaf(*pmd)) {
260 unsigned long lgpa = gpa & PMD_MASK;
263 * If we raced with another CPU which has just put
264 * a 2MB pte in after we saw a pte page, try again.
266 if (level == 0 && !new_ptep) {
270 /* Valid 2MB page here already, remove it */
271 old = kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
272 ~0UL, 0, lgpa, PMD_SHIFT);
273 kvmppc_radix_tlbie_page(kvm, lgpa, PMD_SHIFT);
274 if (old & _PAGE_DIRTY) {
275 unsigned long gfn = lgpa >> PAGE_SHIFT;
276 struct kvm_memory_slot *memslot;
277 memslot = gfn_to_memslot(kvm, gfn);
279 mark_pages_dirty(kvm, memslot, gfn,
280 PMD_SHIFT - PAGE_SHIFT);
282 } else if (level == 1 && !pmd_none(*pmd)) {
284 * There's a page table page here, but we wanted
285 * to install a large page. Tell the caller and let
286 * it try installing a normal page if it wants.
292 if (pmd_none(*pmd)) {
295 pmd_populate(kvm->mm, pmd, new_ptep);
298 ptep = pte_offset_kernel(pmd, gpa);
299 if (pte_present(*ptep)) {
300 /* PTE was previously valid, so invalidate it */
301 old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT,
303 kvmppc_radix_tlbie_page(kvm, gpa, 0);
304 if (old & _PAGE_DIRTY)
305 mark_page_dirty(kvm, gpa >> PAGE_SHIFT);
307 kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
309 kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
314 spin_unlock(&kvm->mmu_lock);
316 pud_free(kvm->mm, new_pud);
318 pmd_free(kvm->mm, new_pmd);
320 kvmppc_pte_free(new_ptep);
324 int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
325 unsigned long ea, unsigned long dsisr)
327 struct kvm *kvm = vcpu->kvm;
328 unsigned long mmu_seq, pte_size;
329 unsigned long gpa, gfn, hva, pfn;
330 struct kvm_memory_slot *memslot;
331 struct page *page = NULL, *pages[1];
332 long ret, npages, ok;
333 unsigned int writing;
334 struct vm_area_struct *vma;
337 unsigned long pgflags;
338 unsigned int shift, level;
340 /* Check for unusual errors */
341 if (dsisr & DSISR_UNSUPP_MMU) {
342 pr_err("KVM: Got unsupported MMU fault\n");
345 if (dsisr & DSISR_BADACCESS) {
346 /* Reflect to the guest as DSI */
347 pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
348 kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
352 /* Translate the logical address and get the page */
353 gpa = vcpu->arch.fault_gpa & ~0xfffUL;
354 gpa &= ~0xF000000000000000ul;
355 gfn = gpa >> PAGE_SHIFT;
356 if (!(dsisr & DSISR_PRTABLE_FAULT))
358 memslot = gfn_to_memslot(kvm, gfn);
360 /* No memslot means it's an emulated MMIO region */
361 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
362 if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS |
365 * Bad address in guest page table tree, or other
366 * unusual error - reflect it to the guest as DSI.
368 kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
371 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
372 dsisr & DSISR_ISSTORE);
375 /* used to check for invalidations in progress */
376 mmu_seq = kvm->mmu_notifier_seq;
379 writing = (dsisr & DSISR_ISSTORE) != 0;
380 hva = gfn_to_hva_memslot(memslot, gfn);
381 if (dsisr & DSISR_SET_RC) {
383 * Need to set an R or C bit in the 2nd-level tables;
384 * if the relevant bits aren't already set in the linux
385 * page tables, fall through to do the gup_fast to
386 * set them in the linux page tables too.
389 pgflags = _PAGE_ACCESSED;
391 pgflags |= _PAGE_DIRTY;
392 local_irq_save(flags);
393 ptep = find_current_mm_pte(current->mm->pgd, hva, NULL, NULL);
395 pte = READ_ONCE(*ptep);
396 if (pte_present(pte) &&
397 (pte_val(pte) & pgflags) == pgflags)
400 local_irq_restore(flags);
402 spin_lock(&kvm->mmu_lock);
403 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
404 spin_unlock(&kvm->mmu_lock);
408 * We are walking the secondary page table here. We can do this
409 * without disabling irq.
411 ptep = __find_linux_pte(kvm->arch.pgtable,
413 if (ptep && pte_present(*ptep)) {
414 kvmppc_radix_update_pte(kvm, ptep, 0, pgflags,
416 spin_unlock(&kvm->mmu_lock);
419 spin_unlock(&kvm->mmu_lock);
425 pte_size = PAGE_SIZE;
426 pgflags = _PAGE_READ | _PAGE_EXEC;
428 npages = get_user_pages_fast(hva, 1, writing, pages);
430 /* Check if it's an I/O mapping */
431 down_read(¤t->mm->mmap_sem);
432 vma = find_vma(current->mm, hva);
433 if (vma && vma->vm_start <= hva && hva < vma->vm_end &&
434 (vma->vm_flags & VM_PFNMAP)) {
435 pfn = vma->vm_pgoff +
436 ((hva - vma->vm_start) >> PAGE_SHIFT);
437 pgflags = pgprot_val(vma->vm_page_prot);
439 up_read(¤t->mm->mmap_sem);
444 pfn = page_to_pfn(page);
445 if (PageCompound(page)) {
446 pte_size <<= compound_order(compound_head(page));
447 /* See if we can insert a 2MB large-page PTE here */
448 if (pte_size >= PMD_SIZE &&
449 (gpa & (PMD_SIZE - PAGE_SIZE)) ==
450 (hva & (PMD_SIZE - PAGE_SIZE))) {
452 pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1);
455 /* See if we can provide write access */
457 pgflags |= _PAGE_WRITE;
459 local_irq_save(flags);
460 ptep = find_current_mm_pte(current->mm->pgd,
462 if (ptep && pte_write(*ptep))
463 pgflags |= _PAGE_WRITE;
464 local_irq_restore(flags);
469 * Compute the PTE value that we need to insert.
471 pgflags |= _PAGE_PRESENT | _PAGE_PTE | _PAGE_ACCESSED;
472 if (pgflags & _PAGE_WRITE)
473 pgflags |= _PAGE_DIRTY;
474 pte = pfn_pte(pfn, __pgprot(pgflags));
476 /* Allocate space in the tree and write the PTE */
477 ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
480 * There's already a PMD where wanted to install a large page;
481 * for now, fall back to installing a small page.
484 pfn |= gfn & ((PMD_SIZE >> PAGE_SHIFT) - 1);
485 pte = pfn_pte(pfn, __pgprot(pgflags));
486 ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
490 if (!ret && (pgflags & _PAGE_WRITE))
491 set_page_dirty_lock(page);
495 if (ret == 0 || ret == -EAGAIN)
500 static void mark_pages_dirty(struct kvm *kvm, struct kvm_memory_slot *memslot,
501 unsigned long gfn, unsigned int order)
503 unsigned long i, limit;
506 if (!memslot->dirty_bitmap)
508 limit = 1ul << order;
509 if (limit < BITS_PER_LONG) {
510 for (i = 0; i < limit; ++i)
511 mark_page_dirty(kvm, gfn + i);
514 dp = memslot->dirty_bitmap + (gfn - memslot->base_gfn);
515 limit /= BITS_PER_LONG;
516 for (i = 0; i < limit; ++i)
520 /* Called with kvm->lock held */
521 int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
525 unsigned long gpa = gfn << PAGE_SHIFT;
529 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
530 if (ptep && pte_present(*ptep)) {
531 old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT, 0,
533 kvmppc_radix_tlbie_page(kvm, gpa, shift);
534 if (old & _PAGE_DIRTY) {
536 mark_page_dirty(kvm, gfn);
538 mark_pages_dirty(kvm, memslot,
539 gfn, shift - PAGE_SHIFT);
545 /* Called with kvm->lock held */
546 int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
550 unsigned long gpa = gfn << PAGE_SHIFT;
554 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
555 if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
556 kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
558 /* XXX need to flush tlb here? */
564 /* Called with kvm->lock held */
565 int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
569 unsigned long gpa = gfn << PAGE_SHIFT;
573 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
574 if (ptep && pte_present(*ptep) && pte_young(*ptep))
579 /* Returns the number of PAGE_SIZE pages that are dirty */
580 static int kvm_radix_test_clear_dirty(struct kvm *kvm,
581 struct kvm_memory_slot *memslot, int pagenum)
583 unsigned long gfn = memslot->base_gfn + pagenum;
584 unsigned long gpa = gfn << PAGE_SHIFT;
589 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
590 if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
593 ret = 1 << (shift - PAGE_SHIFT);
594 kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
596 kvmppc_radix_tlbie_page(kvm, gpa, shift);
601 long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
602 struct kvm_memory_slot *memslot, unsigned long *map)
609 * Radix accumulates dirty bits in the first half of the
610 * memslot's dirty_bitmap area, for when pages are paged
611 * out or modified by the host directly. Pick up these
612 * bits and add them to the map.
614 n = kvm_dirty_bitmap_bytes(memslot) / sizeof(long);
615 p = memslot->dirty_bitmap;
616 for (i = 0; i < n; ++i)
617 map[i] |= xchg(&p[i], 0);
619 for (i = 0; i < memslot->npages; i = j) {
620 npages = kvm_radix_test_clear_dirty(kvm, memslot, i);
623 * Note that if npages > 0 then i must be a multiple of npages,
624 * since huge pages are only used to back the guest at guest
625 * real addresses that are a multiple of their size.
626 * Since we have at most one PTE covering any given guest
627 * real address, if npages > 1 we can skip to i + npages.
631 for (j = i; npages; ++j, --npages)
632 __set_bit_le(j, map);
637 static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info,
638 int psize, int *indexp)
640 if (!mmu_psize_defs[psize].shift)
642 info->ap_encodings[*indexp] = mmu_psize_defs[psize].shift |
643 (mmu_psize_defs[psize].ap << 29);
647 int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info)
651 if (!radix_enabled())
653 memset(info, 0, sizeof(*info));
656 info->geometries[0].page_shift = 12;
657 info->geometries[0].level_bits[0] = 9;
658 for (i = 1; i < 4; ++i)
659 info->geometries[0].level_bits[i] = p9_supported_radix_bits[i];
661 info->geometries[1].page_shift = 16;
662 for (i = 0; i < 4; ++i)
663 info->geometries[1].level_bits[i] = p9_supported_radix_bits[i];
666 add_rmmu_ap_encoding(info, MMU_PAGE_4K, &i);
667 add_rmmu_ap_encoding(info, MMU_PAGE_64K, &i);
668 add_rmmu_ap_encoding(info, MMU_PAGE_2M, &i);
669 add_rmmu_ap_encoding(info, MMU_PAGE_1G, &i);
674 int kvmppc_init_vm_radix(struct kvm *kvm)
676 kvm->arch.pgtable = pgd_alloc(kvm->mm);
677 if (!kvm->arch.pgtable)
682 void kvmppc_free_radix(struct kvm *kvm)
684 unsigned long ig, iu, im;
690 if (!kvm->arch.pgtable)
692 pgd = kvm->arch.pgtable;
693 for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
694 if (!pgd_present(*pgd))
696 pud = pud_offset(pgd, 0);
697 for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++pud) {
698 if (!pud_present(*pud))
700 pmd = pmd_offset(pud, 0);
701 for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) {
702 if (pmd_is_leaf(*pmd)) {
706 if (!pmd_present(*pmd))
708 pte = pte_offset_map(pmd, 0);
709 memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE);
710 kvmppc_pte_free(pte);
713 pmd_free(kvm->mm, pmd_offset(pud, 0));
716 pud_free(kvm->mm, pud_offset(pgd, 0));
719 pgd_free(kvm->mm, kvm->arch.pgtable);
722 static void pte_ctor(void *addr)
724 memset(addr, 0, PTE_TABLE_SIZE);
727 int kvmppc_radix_init(void)
729 unsigned long size = sizeof(void *) << PTE_INDEX_SIZE;
731 kvm_pte_cache = kmem_cache_create("kvm-pte", size, size, 0, pte_ctor);
737 void kvmppc_radix_exit(void)
739 kmem_cache_destroy(kvm_pte_cache);