1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 #include <linux/types.h>
8 #include <linux/string.h>
10 #include <linux/kvm_host.h>
11 #include <linux/anon_inodes.h>
12 #include <linux/file.h>
13 #include <linux/debugfs.h>
14 #include <linux/pgtable.h>
16 #include <asm/kvm_ppc.h>
17 #include <asm/kvm_book3s.h>
18 #include "book3s_hv.h"
21 #include <asm/pgalloc.h>
22 #include <asm/pte-walk.h>
23 #include <asm/ultravisor.h>
24 #include <asm/kvm_book3s_uvmem.h>
25 #include <asm/plpar_wrappers.h>
26 #include <asm/firmware.h>
29 * Supported radix tree geometry.
30 * Like p9, we support either 5 or 9 bits at the first (lowest) level,
31 * for a page size of 64k or 4k.
33 static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
35 unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
36 gva_t eaddr, void *to, void *from,
39 int old_pid, old_lpid;
40 unsigned long quadrant, ret = n;
43 /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */
44 if (kvmhv_on_pseries())
45 return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr,
46 (to != NULL) ? __pa(to): 0,
47 (from != NULL) ? __pa(from): 0, n);
49 if (eaddr & (0xFFFUL << 52))
56 from = (void *) (eaddr | (quadrant << 62));
58 to = (void *) (eaddr | (quadrant << 62));
62 asm volatile("hwsync" ::: "memory");
64 /* switch the lpid first to avoid running host with unallocated pid */
65 old_lpid = mfspr(SPRN_LPID);
67 mtspr(SPRN_LPID, lpid);
69 old_pid = mfspr(SPRN_PID);
77 ret = __copy_from_user_inatomic(to, (const void __user *)from, n);
79 ret = __copy_to_user_inatomic((void __user *)to, from, n);
82 asm volatile("hwsync" ::: "memory");
84 /* switch the pid first to avoid running host with unallocated pid */
85 if (quadrant == 1 && pid != old_pid)
86 mtspr(SPRN_PID, old_pid);
88 mtspr(SPRN_LPID, old_lpid);
96 static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
97 void *to, void *from, unsigned long n)
99 int lpid = vcpu->kvm->arch.lpid;
100 int pid = kvmppc_get_pid(vcpu);
102 /* This would cause a data segment intr so don't allow the access */
103 if (eaddr & (0x3FFUL << 52))
106 /* Should we be using the nested lpid */
107 if (vcpu->arch.nested)
108 lpid = vcpu->arch.nested->shadow_lpid;
110 /* If accessing quadrant 3 then pid is expected to be 0 */
111 if (((eaddr >> 62) & 0x3) == 0x3)
114 eaddr &= ~(0xFFFUL << 52);
116 return __kvmhv_copy_tofrom_guest_radix(lpid, pid, eaddr, to, from, n);
119 long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to,
124 ret = kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, to, NULL, n);
126 memset(to + (n - ret), 0, ret);
131 long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *from,
134 return kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, NULL, from, n);
137 int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
138 struct kvmppc_pte *gpte, u64 root,
141 struct kvm *kvm = vcpu->kvm;
143 unsigned long rts, bits, offset, index;
147 rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
148 ((root & RTS2_MASK) >> RTS2_SHIFT);
149 bits = root & RPDS_MASK;
150 base = root & RPDB_MASK;
154 /* Current implementations only support 52-bit space */
158 /* Walk each level of the radix tree */
159 for (level = 3; level >= 0; --level) {
161 /* Check a valid size */
162 if (level && bits != p9_supported_radix_bits[level])
164 if (level == 0 && !(bits == 5 || bits == 9))
167 index = (eaddr >> offset) & ((1UL << bits) - 1);
168 /* Check that low bits of page table base are zero */
169 if (base & ((1UL << (bits + 3)) - 1))
171 /* Read the entry from guest memory */
172 addr = base + (index * sizeof(rpte));
174 kvm_vcpu_srcu_read_lock(vcpu);
175 ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte));
176 kvm_vcpu_srcu_read_unlock(vcpu);
182 pte = __be64_to_cpu(rpte);
183 if (!(pte & _PAGE_PRESENT))
185 /* Check if a leaf entry */
188 /* Get ready to walk the next level */
189 base = pte & RPDB_MASK;
190 bits = pte & RPDS_MASK;
193 /* Need a leaf at lowest level; 512GB pages not supported */
194 if (level < 0 || level == 3)
197 /* We found a valid leaf PTE */
198 /* Offset is now log base 2 of the page size */
199 gpa = pte & 0x01fffffffffff000ul;
200 if (gpa & ((1ul << offset) - 1))
202 gpa |= eaddr & ((1ul << offset) - 1);
203 for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps)
204 if (offset == mmu_psize_defs[ps].shift)
206 gpte->page_size = ps;
207 gpte->page_shift = offset;
212 /* Work out permissions */
213 gpte->may_read = !!(pte & _PAGE_READ);
214 gpte->may_write = !!(pte & _PAGE_WRITE);
215 gpte->may_execute = !!(pte & _PAGE_EXEC);
217 gpte->rc = pte & (_PAGE_ACCESSED | _PAGE_DIRTY);
226 * Used to walk a partition or process table radix tree in guest memory
227 * Note: We exploit the fact that a partition table and a process
228 * table have the same layout, a partition-scoped page table and a
229 * process-scoped page table have the same layout, and the 2nd
230 * doubleword of a partition table entry has the same layout as
233 int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
234 struct kvmppc_pte *gpte, u64 table,
235 int table_index, u64 *pte_ret_p)
237 struct kvm *kvm = vcpu->kvm;
239 unsigned long size, ptbl, root;
240 struct prtb_entry entry;
242 if ((table & PRTS_MASK) > 24)
244 size = 1ul << ((table & PRTS_MASK) + 12);
246 /* Is the table big enough to contain this entry? */
247 if ((table_index * sizeof(entry)) >= size)
250 /* Read the table to find the root of the radix tree */
251 ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry));
252 kvm_vcpu_srcu_read_lock(vcpu);
253 ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry));
254 kvm_vcpu_srcu_read_unlock(vcpu);
258 /* Root is stored in the first double word */
259 root = be64_to_cpu(entry.prtb0);
261 return kvmppc_mmu_walk_radix_tree(vcpu, eaddr, gpte, root, pte_ret_p);
264 int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
265 struct kvmppc_pte *gpte, bool data, bool iswrite)
271 /* Work out effective PID */
272 switch (eaddr >> 62) {
274 pid = kvmppc_get_pid(vcpu);
283 ret = kvmppc_mmu_radix_translate_table(vcpu, eaddr, gpte,
284 vcpu->kvm->arch.process_table, pid, &pte);
288 /* Check privilege (applies only to process scoped translations) */
289 if (kvmppc_get_msr(vcpu) & MSR_PR) {
290 if (pte & _PAGE_PRIVILEGED) {
293 gpte->may_execute = 0;
296 if (!(pte & _PAGE_PRIVILEGED)) {
297 /* Check AMR/IAMR to see if strict mode is in force */
298 if (kvmppc_get_amr_hv(vcpu) & (1ul << 62))
300 if (kvmppc_get_amr_hv(vcpu) & (1ul << 63))
302 if (vcpu->arch.iamr & (1ul << 62))
303 gpte->may_execute = 0;
310 void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
311 unsigned int pshift, u64 lpid)
313 unsigned long psize = PAGE_SIZE;
319 psize = 1UL << pshift;
323 addr &= ~(psize - 1);
325 if (!kvmhv_on_pseries()) {
326 radix__flush_tlb_lpid_page(lpid, addr, psize);
330 psi = shift_to_mmu_psize(pshift);
332 if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE)) {
333 rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
334 rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
337 rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
340 psize_to_rpti_pgsize(psi),
345 pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
348 static void kvmppc_radix_flush_pwc(struct kvm *kvm, u64 lpid)
352 if (!kvmhv_on_pseries()) {
353 radix__flush_pwc_lpid(lpid);
357 if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE))
358 rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
359 lpid, TLBIEL_INVAL_SET_LPID);
361 rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
363 H_RPTI_TYPE_PWC, H_RPTI_PAGE_ALL,
366 pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc);
369 static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
370 unsigned long clr, unsigned long set,
371 unsigned long addr, unsigned int shift)
373 return __radix_pte_update(ptep, clr, set);
376 static void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
377 pte_t *ptep, pte_t pte)
379 radix__set_pte_at(kvm->mm, addr, ptep, pte, 0);
382 static struct kmem_cache *kvm_pte_cache;
383 static struct kmem_cache *kvm_pmd_cache;
385 static pte_t *kvmppc_pte_alloc(void)
389 pte = kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
390 /* pmd_populate() will only reference _pa(pte). */
391 kmemleak_ignore(pte);
396 static void kvmppc_pte_free(pte_t *ptep)
398 kmem_cache_free(kvm_pte_cache, ptep);
401 static pmd_t *kvmppc_pmd_alloc(void)
405 pmd = kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
406 /* pud_populate() will only reference _pa(pmd). */
407 kmemleak_ignore(pmd);
412 static void kvmppc_pmd_free(pmd_t *pmdp)
414 kmem_cache_free(kvm_pmd_cache, pmdp);
417 /* Called with kvm->mmu_lock held */
418 void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
420 const struct kvm_memory_slot *memslot,
425 unsigned long gfn = gpa >> PAGE_SHIFT;
426 unsigned long page_size = PAGE_SIZE;
429 old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift);
430 kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
432 /* The following only applies to L1 entries */
433 if (lpid != kvm->arch.lpid)
437 memslot = gfn_to_memslot(kvm, gfn);
441 if (shift) { /* 1GB or 2MB page */
442 page_size = 1ul << shift;
443 if (shift == PMD_SHIFT)
444 kvm->stat.num_2M_pages--;
445 else if (shift == PUD_SHIFT)
446 kvm->stat.num_1G_pages--;
449 gpa &= ~(page_size - 1);
450 hpa = old & PTE_RPN_MASK;
451 kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size);
453 if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap)
454 kvmppc_update_dirty_map(memslot, gfn, page_size);
458 * kvmppc_free_p?d are used to free existing page tables, and recursively
459 * descend and clear and free children.
460 * Callers are responsible for flushing the PWC.
462 * When page tables are being unmapped/freed as part of page fault path
463 * (full == false), valid ptes are generally not expected; however, there
464 * is one situation where they arise, which is when dirty page logging is
465 * turned off for a memslot while the VM is running. The new memslot
466 * becomes visible to page faults before the memslot commit function
467 * gets to flush the memslot, which can lead to a 2MB page mapping being
468 * installed for a guest physical address where there are already 64kB
469 * (or 4kB) mappings (of sub-pages of the same 2MB page).
471 static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
475 memset(pte, 0, sizeof(long) << RADIX_PTE_INDEX_SIZE);
480 for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
481 if (pte_val(*p) == 0)
483 kvmppc_unmap_pte(kvm, p,
484 pte_pfn(*p) << PAGE_SHIFT,
485 PAGE_SHIFT, NULL, lpid);
489 kvmppc_pte_free(pte);
492 static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
498 for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
499 if (!pmd_present(*p))
501 if (pmd_is_leaf(*p)) {
506 kvmppc_unmap_pte(kvm, (pte_t *)p,
507 pte_pfn(*(pte_t *)p) << PAGE_SHIFT,
508 PMD_SHIFT, NULL, lpid);
513 pte = pte_offset_kernel(p, 0);
514 kvmppc_unmap_free_pte(kvm, pte, full, lpid);
518 kvmppc_pmd_free(pmd);
521 static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
527 for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
528 if (!pud_present(*p))
530 if (pud_is_leaf(*p)) {
535 pmd = pmd_offset(p, 0);
536 kvmppc_unmap_free_pmd(kvm, pmd, true, lpid);
540 pud_free(kvm->mm, pud);
543 void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, u64 lpid)
547 for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
548 p4d_t *p4d = p4d_offset(pgd, 0);
551 if (!p4d_present(*p4d))
553 pud = pud_offset(p4d, 0);
554 kvmppc_unmap_free_pud(kvm, pud, lpid);
559 void kvmppc_free_radix(struct kvm *kvm)
561 if (kvm->arch.pgtable) {
562 kvmppc_free_pgtable_radix(kvm, kvm->arch.pgtable,
564 pgd_free(kvm->mm, kvm->arch.pgtable);
565 kvm->arch.pgtable = NULL;
569 static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
570 unsigned long gpa, u64 lpid)
572 pte_t *pte = pte_offset_kernel(pmd, 0);
575 * Clearing the pmd entry then flushing the PWC ensures that the pte
576 * page no longer be cached by the MMU, so can be freed without
577 * flushing the PWC again.
580 kvmppc_radix_flush_pwc(kvm, lpid);
582 kvmppc_unmap_free_pte(kvm, pte, false, lpid);
585 static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
586 unsigned long gpa, u64 lpid)
588 pmd_t *pmd = pmd_offset(pud, 0);
591 * Clearing the pud entry then flushing the PWC ensures that the pmd
592 * page and any children pte pages will no longer be cached by the MMU,
593 * so can be freed without flushing the PWC again.
596 kvmppc_radix_flush_pwc(kvm, lpid);
598 kvmppc_unmap_free_pmd(kvm, pmd, false, lpid);
602 * There are a number of bits which may differ between different faults to
603 * the same partition scope entry. RC bits, in the course of cleaning and
604 * aging. And the write bit can change, either the access could have been
605 * upgraded, or a read fault could happen concurrently with a write fault
606 * that sets those bits first.
608 #define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED))
610 int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
611 unsigned long gpa, unsigned int level,
612 unsigned long mmu_seq, u64 lpid,
613 unsigned long *rmapp, struct rmap_nested **n_rmap)
617 pud_t *pud, *new_pud = NULL;
618 pmd_t *pmd, *new_pmd = NULL;
619 pte_t *ptep, *new_ptep = NULL;
622 /* Traverse the guest's 2nd-level tree, allocate new levels needed */
623 pgd = pgtable + pgd_index(gpa);
624 p4d = p4d_offset(pgd, gpa);
627 if (p4d_present(*p4d))
628 pud = pud_offset(p4d, gpa);
630 new_pud = pud_alloc_one(kvm->mm, gpa);
633 if (pud && pud_present(*pud) && !pud_is_leaf(*pud))
634 pmd = pmd_offset(pud, gpa);
636 new_pmd = kvmppc_pmd_alloc();
638 if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
639 new_ptep = kvmppc_pte_alloc();
641 /* Check if we might have been invalidated; let the guest retry if so */
642 spin_lock(&kvm->mmu_lock);
644 if (mmu_invalidate_retry(kvm, mmu_seq))
647 /* Now traverse again under the lock and change the tree */
649 if (p4d_none(*p4d)) {
652 p4d_populate(kvm->mm, p4d, new_pud);
655 pud = pud_offset(p4d, gpa);
656 if (pud_is_leaf(*pud)) {
657 unsigned long hgpa = gpa & PUD_MASK;
659 /* Check if we raced and someone else has set the same thing */
661 if (pud_raw(*pud) == pte_raw(pte)) {
665 /* Valid 1GB page here already, add our extra bits */
666 WARN_ON_ONCE((pud_val(*pud) ^ pte_val(pte)) &
667 PTE_BITS_MUST_MATCH);
668 kvmppc_radix_update_pte(kvm, (pte_t *)pud,
669 0, pte_val(pte), hgpa, PUD_SHIFT);
674 * If we raced with another CPU which has just put
675 * a 1GB pte in after we saw a pmd page, try again.
681 /* Valid 1GB page here already, remove it */
682 kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL,
686 if (!pud_none(*pud)) {
688 * There's a page table page here, but we wanted to
689 * install a large page, so remove and free the page
692 kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid);
694 kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte);
696 kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
700 if (pud_none(*pud)) {
703 pud_populate(kvm->mm, pud, new_pmd);
706 pmd = pmd_offset(pud, gpa);
707 if (pmd_is_leaf(*pmd)) {
708 unsigned long lgpa = gpa & PMD_MASK;
710 /* Check if we raced and someone else has set the same thing */
712 if (pmd_raw(*pmd) == pte_raw(pte)) {
716 /* Valid 2MB page here already, add our extra bits */
717 WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) &
718 PTE_BITS_MUST_MATCH);
719 kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
720 0, pte_val(pte), lgpa, PMD_SHIFT);
726 * If we raced with another CPU which has just put
727 * a 2MB pte in after we saw a pte page, try again.
733 /* Valid 2MB page here already, remove it */
734 kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL,
738 if (!pmd_none(*pmd)) {
740 * There's a page table page here, but we wanted to
741 * install a large page, so remove and free the page
744 kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid);
746 kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
748 kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
752 if (pmd_none(*pmd)) {
755 pmd_populate(kvm->mm, pmd, new_ptep);
758 ptep = pte_offset_kernel(pmd, gpa);
759 if (pte_present(*ptep)) {
760 /* Check if someone else set the same thing */
761 if (pte_raw(*ptep) == pte_raw(pte)) {
765 /* Valid page here already, add our extra bits */
766 WARN_ON_ONCE((pte_val(*ptep) ^ pte_val(pte)) &
767 PTE_BITS_MUST_MATCH);
768 kvmppc_radix_update_pte(kvm, ptep, 0, pte_val(pte), gpa, 0);
772 kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
774 kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
778 spin_unlock(&kvm->mmu_lock);
780 pud_free(kvm->mm, new_pud);
782 kvmppc_pmd_free(new_pmd);
784 kvmppc_pte_free(new_ptep);
788 bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing,
789 unsigned long gpa, u64 lpid)
791 unsigned long pgflags;
796 * Need to set an R or C bit in the 2nd-level tables;
797 * since we are just helping out the hardware here,
798 * it is sufficient to do what the hardware does.
800 pgflags = _PAGE_ACCESSED;
802 pgflags |= _PAGE_DIRTY;
805 ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
807 ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
809 if (ptep && pte_present(*ptep) && (!writing || pte_write(*ptep))) {
810 kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift);
816 int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
818 struct kvm_memory_slot *memslot,
819 bool writing, bool kvm_ro,
820 pte_t *inserted_pte, unsigned int *levelp)
822 struct kvm *kvm = vcpu->kvm;
823 struct page *page = NULL;
824 unsigned long mmu_seq;
825 unsigned long hva, gfn = gpa >> PAGE_SHIFT;
826 bool upgrade_write = false;
827 bool *upgrade_p = &upgrade_write;
829 unsigned int shift, level;
833 /* used to check for invalidations in progress */
834 mmu_seq = kvm->mmu_invalidate_seq;
838 * Do a fast check first, since __gfn_to_pfn_memslot doesn't
839 * do it with !atomic && !async, which is how we call it.
840 * We always ask for write permission since the common case
841 * is that the page is writable.
843 hva = gfn_to_hva_memslot(memslot, gfn);
844 if (!kvm_ro && get_user_page_fast_only(hva, FOLL_WRITE, &page)) {
845 upgrade_write = true;
849 /* Call KVM generic code to do the slow-path check */
850 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
851 writing, upgrade_p, NULL);
852 if (is_error_noslot_pfn(pfn))
855 if (pfn_valid(pfn)) {
856 page = pfn_to_page(pfn);
857 if (PageReserved(page))
863 * Read the PTE from the process' radix tree and use that
864 * so we get the shift and attribute bits.
866 spin_lock(&kvm->mmu_lock);
867 ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift);
870 pte = READ_ONCE(*ptep);
871 spin_unlock(&kvm->mmu_lock);
873 * If the PTE disappeared temporarily due to a THP
874 * collapse, just return and let the guest try again.
876 if (!pte_present(pte)) {
882 /* If we're logging dirty pages, always map single pages */
883 large_enable = !(memslot->flags & KVM_MEM_LOG_DIRTY_PAGES);
885 /* Get pte level from shift/size */
886 if (large_enable && shift == PUD_SHIFT &&
887 (gpa & (PUD_SIZE - PAGE_SIZE)) ==
888 (hva & (PUD_SIZE - PAGE_SIZE))) {
890 } else if (large_enable && shift == PMD_SHIFT &&
891 (gpa & (PMD_SIZE - PAGE_SIZE)) ==
892 (hva & (PMD_SIZE - PAGE_SIZE))) {
896 if (shift > PAGE_SHIFT) {
898 * If the pte maps more than one page, bring over
899 * bits from the virtual address to get the real
900 * address of the specific single page we want.
902 unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
903 pte = __pte(pte_val(pte) | (hva & rpnmask));
907 pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
908 if (writing || upgrade_write) {
909 if (pte_val(pte) & _PAGE_WRITE)
910 pte = __pte(pte_val(pte) | _PAGE_DIRTY);
912 pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
915 /* Allocate space in the tree and write the PTE */
916 ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level,
917 mmu_seq, kvm->arch.lpid, NULL, NULL);
924 if (!ret && (pte_val(pte) & _PAGE_WRITE))
925 set_page_dirty_lock(page);
929 /* Increment number of large pages if we (successfully) inserted one */
932 kvm->stat.num_2M_pages++;
934 kvm->stat.num_1G_pages++;
940 int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
941 unsigned long ea, unsigned long dsisr)
943 struct kvm *kvm = vcpu->kvm;
944 unsigned long gpa, gfn;
945 struct kvm_memory_slot *memslot;
947 bool writing = !!(dsisr & DSISR_ISSTORE);
950 /* Check for unusual errors */
951 if (dsisr & DSISR_UNSUPP_MMU) {
952 pr_err("KVM: Got unsupported MMU fault\n");
955 if (dsisr & DSISR_BADACCESS) {
956 /* Reflect to the guest as DSI */
957 pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
958 kvmppc_core_queue_data_storage(vcpu,
959 kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
964 /* Translate the logical address */
965 gpa = vcpu->arch.fault_gpa & ~0xfffUL;
966 gpa &= ~0xF000000000000000ul;
967 gfn = gpa >> PAGE_SHIFT;
968 if (!(dsisr & DSISR_PRTABLE_FAULT))
971 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
972 return kvmppc_send_page_to_uv(kvm, gfn);
974 /* Get the corresponding memslot */
975 memslot = gfn_to_memslot(kvm, gfn);
977 /* No memslot means it's an emulated MMIO region */
978 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
979 if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS |
982 * Bad address in guest page table tree, or other
983 * unusual error - reflect it to the guest as DSI.
985 kvmppc_core_queue_data_storage(vcpu,
986 kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
990 return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
993 if (memslot->flags & KVM_MEM_READONLY) {
995 /* give the guest a DSI */
996 kvmppc_core_queue_data_storage(vcpu,
997 kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
998 ea, DSISR_ISSTORE | DSISR_PROTFAULT);
1004 /* Failed to set the reference/change bits */
1005 if (dsisr & DSISR_SET_RC) {
1006 spin_lock(&kvm->mmu_lock);
1007 if (kvmppc_hv_handle_set_rc(kvm, false, writing,
1008 gpa, kvm->arch.lpid))
1009 dsisr &= ~DSISR_SET_RC;
1010 spin_unlock(&kvm->mmu_lock);
1012 if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
1013 DSISR_PROTFAULT | DSISR_SET_RC)))
1014 return RESUME_GUEST;
1017 /* Try to insert a pte */
1018 ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing,
1019 kvm_ro, NULL, NULL);
1021 if (ret == 0 || ret == -EAGAIN)
1026 /* Called with kvm->mmu_lock held */
1027 void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
1031 unsigned long gpa = gfn << PAGE_SHIFT;
1034 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) {
1035 uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT);
1039 ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
1040 if (ptep && pte_present(*ptep))
1041 kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
1045 /* Called with kvm->mmu_lock held */
1046 bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
1050 unsigned long gpa = gfn << PAGE_SHIFT;
1053 unsigned long old, *rmapp;
1055 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1058 ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
1059 if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
1060 old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
1062 /* XXX need to flush tlb here? */
1063 /* Also clear bit in ptes in shadow pgtable for nested guests */
1064 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1065 kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_ACCESSED, 0,
1073 /* Called with kvm->mmu_lock held */
1074 bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
1079 unsigned long gpa = gfn << PAGE_SHIFT;
1083 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1086 ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
1087 if (ptep && pte_present(*ptep) && pte_young(*ptep))
1092 /* Returns the number of PAGE_SIZE pages that are dirty */
1093 static int kvm_radix_test_clear_dirty(struct kvm *kvm,
1094 struct kvm_memory_slot *memslot, int pagenum)
1096 unsigned long gfn = memslot->base_gfn + pagenum;
1097 unsigned long gpa = gfn << PAGE_SHIFT;
1101 unsigned long old, *rmapp;
1103 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1107 * For performance reasons we don't hold kvm->mmu_lock while walking the
1108 * partition scoped table.
1110 ptep = find_kvm_secondary_pte_unlocked(kvm, gpa, &shift);
1114 pte = READ_ONCE(*ptep);
1115 if (pte_present(pte) && pte_dirty(pte)) {
1116 spin_lock(&kvm->mmu_lock);
1118 * Recheck the pte again
1120 if (pte_val(pte) != pte_val(*ptep)) {
1122 * We have KVM_MEM_LOG_DIRTY_PAGES enabled. Hence we can
1123 * only find PAGE_SIZE pte entries here. We can continue
1124 * to use the pte addr returned by above page table
1127 if (!pte_present(*ptep) || !pte_dirty(*ptep)) {
1128 spin_unlock(&kvm->mmu_lock);
1135 old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
1137 kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid);
1138 /* Also clear bit in ptes in shadow pgtable for nested guests */
1139 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1140 kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_DIRTY, 0,
1143 spin_unlock(&kvm->mmu_lock);
1148 long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
1149 struct kvm_memory_slot *memslot, unsigned long *map)
1154 for (i = 0; i < memslot->npages; i = j) {
1155 npages = kvm_radix_test_clear_dirty(kvm, memslot, i);
1158 * Note that if npages > 0 then i must be a multiple of npages,
1159 * since huge pages are only used to back the guest at guest
1160 * real addresses that are a multiple of their size.
1161 * Since we have at most one PTE covering any given guest
1162 * real address, if npages > 1 we can skip to i + npages.
1166 set_dirty_bits(map, i, npages);
1173 void kvmppc_radix_flush_memslot(struct kvm *kvm,
1174 const struct kvm_memory_slot *memslot)
1181 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)
1182 kvmppc_uvmem_drop_pages(memslot, kvm, true);
1184 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1187 gpa = memslot->base_gfn << PAGE_SHIFT;
1188 spin_lock(&kvm->mmu_lock);
1189 for (n = memslot->npages; n; --n) {
1190 ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
1191 if (ptep && pte_present(*ptep))
1192 kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
1197 * Increase the mmu notifier sequence number to prevent any page
1198 * fault that read the memslot earlier from writing a PTE.
1200 kvm->mmu_invalidate_seq++;
1201 spin_unlock(&kvm->mmu_lock);
1204 static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info,
1205 int psize, int *indexp)
1207 if (!mmu_psize_defs[psize].shift)
1209 info->ap_encodings[*indexp] = mmu_psize_defs[psize].shift |
1210 (mmu_psize_defs[psize].ap << 29);
1214 int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info)
1218 if (!radix_enabled())
1220 memset(info, 0, sizeof(*info));
1223 info->geometries[0].page_shift = 12;
1224 info->geometries[0].level_bits[0] = 9;
1225 for (i = 1; i < 4; ++i)
1226 info->geometries[0].level_bits[i] = p9_supported_radix_bits[i];
1228 info->geometries[1].page_shift = 16;
1229 for (i = 0; i < 4; ++i)
1230 info->geometries[1].level_bits[i] = p9_supported_radix_bits[i];
1233 add_rmmu_ap_encoding(info, MMU_PAGE_4K, &i);
1234 add_rmmu_ap_encoding(info, MMU_PAGE_64K, &i);
1235 add_rmmu_ap_encoding(info, MMU_PAGE_2M, &i);
1236 add_rmmu_ap_encoding(info, MMU_PAGE_1G, &i);
1241 int kvmppc_init_vm_radix(struct kvm *kvm)
1243 kvm->arch.pgtable = pgd_alloc(kvm->mm);
1244 if (!kvm->arch.pgtable)
1249 static void pte_ctor(void *addr)
1251 memset(addr, 0, RADIX_PTE_TABLE_SIZE);
1254 static void pmd_ctor(void *addr)
1256 memset(addr, 0, RADIX_PMD_TABLE_SIZE);
1259 struct debugfs_radix_state {
1270 static int debugfs_radix_open(struct inode *inode, struct file *file)
1272 struct kvm *kvm = inode->i_private;
1273 struct debugfs_radix_state *p;
1275 p = kzalloc(sizeof(*p), GFP_KERNEL);
1281 mutex_init(&p->mutex);
1282 file->private_data = p;
1284 return nonseekable_open(inode, file);
1287 static int debugfs_radix_release(struct inode *inode, struct file *file)
1289 struct debugfs_radix_state *p = file->private_data;
1291 kvm_put_kvm(p->kvm);
1296 static ssize_t debugfs_radix_read(struct file *file, char __user *buf,
1297 size_t len, loff_t *ppos)
1299 struct debugfs_radix_state *p = file->private_data;
1305 struct kvm_nested_guest *nested;
1315 if (!kvm_is_radix(kvm))
1318 ret = mutex_lock_interruptible(&p->mutex);
1322 if (p->chars_left) {
1326 r = copy_to_user(buf, p->buf + p->buf_index, n);
1343 while (len != 0 && p->lpid >= 0) {
1344 if (gpa >= RADIX_PGTABLE_RANGE) {
1348 kvmhv_put_nested(nested);
1351 p->lpid = kvmhv_nested_next_lpid(kvm, p->lpid);
1358 pgt = kvm->arch.pgtable;
1360 nested = kvmhv_get_nested(kvm, p->lpid, false);
1362 gpa = RADIX_PGTABLE_RANGE;
1365 pgt = nested->shadow_pgtable;
1371 n = scnprintf(p->buf, sizeof(p->buf),
1372 "\nNested LPID %d: ", p->lpid);
1373 n += scnprintf(p->buf + n, sizeof(p->buf) - n,
1374 "pgdir: %lx\n", (unsigned long)pgt);
1379 pgdp = pgt + pgd_index(gpa);
1380 p4dp = p4d_offset(pgdp, gpa);
1381 p4d = READ_ONCE(*p4dp);
1382 if (!(p4d_val(p4d) & _PAGE_PRESENT)) {
1383 gpa = (gpa & P4D_MASK) + P4D_SIZE;
1387 pudp = pud_offset(&p4d, gpa);
1388 pud = READ_ONCE(*pudp);
1389 if (!(pud_val(pud) & _PAGE_PRESENT)) {
1390 gpa = (gpa & PUD_MASK) + PUD_SIZE;
1393 if (pud_val(pud) & _PAGE_PTE) {
1399 pmdp = pmd_offset(&pud, gpa);
1400 pmd = READ_ONCE(*pmdp);
1401 if (!(pmd_val(pmd) & _PAGE_PRESENT)) {
1402 gpa = (gpa & PMD_MASK) + PMD_SIZE;
1405 if (pmd_val(pmd) & _PAGE_PTE) {
1411 ptep = pte_offset_kernel(&pmd, gpa);
1412 pte = pte_val(READ_ONCE(*ptep));
1413 if (!(pte & _PAGE_PRESENT)) {
1419 n = scnprintf(p->buf, sizeof(p->buf),
1420 " %lx: %lx %d\n", gpa, pte, shift);
1421 gpa += 1ul << shift;
1426 r = copy_to_user(buf, p->buf, n);
1441 kvmhv_put_nested(nested);
1444 mutex_unlock(&p->mutex);
1448 static ssize_t debugfs_radix_write(struct file *file, const char __user *buf,
1449 size_t len, loff_t *ppos)
1454 static const struct file_operations debugfs_radix_fops = {
1455 .owner = THIS_MODULE,
1456 .open = debugfs_radix_open,
1457 .release = debugfs_radix_release,
1458 .read = debugfs_radix_read,
1459 .write = debugfs_radix_write,
1460 .llseek = generic_file_llseek,
1463 void kvmhv_radix_debugfs_init(struct kvm *kvm)
1465 debugfs_create_file("radix", 0400, kvm->debugfs_dentry, kvm,
1466 &debugfs_radix_fops);
1469 int kvmppc_radix_init(void)
1471 unsigned long size = sizeof(void *) << RADIX_PTE_INDEX_SIZE;
1473 kvm_pte_cache = kmem_cache_create("kvm-pte", size, size, 0, pte_ctor);
1477 size = sizeof(void *) << RADIX_PMD_INDEX_SIZE;
1479 kvm_pmd_cache = kmem_cache_create("kvm-pmd", size, size, 0, pmd_ctor);
1480 if (!kvm_pmd_cache) {
1481 kmem_cache_destroy(kvm_pte_cache);
1488 void kvmppc_radix_exit(void)
1490 kmem_cache_destroy(kvm_pte_cache);
1491 kmem_cache_destroy(kvm_pmd_cache);