1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 #include <linux/types.h>
8 #include <linux/string.h>
10 #include <linux/kvm_host.h>
11 #include <linux/anon_inodes.h>
12 #include <linux/file.h>
13 #include <linux/debugfs.h>
15 #include <asm/kvm_ppc.h>
16 #include <asm/kvm_book3s.h>
19 #include <asm/pgtable.h>
20 #include <asm/pgalloc.h>
21 #include <asm/pte-walk.h>
24 * Supported radix tree geometry.
25 * Like p9, we support either 5 or 9 bits at the first (lowest) level,
26 * for a page size of 64k or 4k.
28 static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
30 unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
31 gva_t eaddr, void *to, void *from,
34 int old_pid, old_lpid;
35 unsigned long quadrant, ret = n;
38 /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */
39 if (kvmhv_on_pseries())
40 return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr,
41 (to != NULL) ? __pa(to): 0,
42 (from != NULL) ? __pa(from): 0, n);
48 from = (void *) (eaddr | (quadrant << 62));
50 to = (void *) (eaddr | (quadrant << 62));
54 /* switch the lpid first to avoid running host with unallocated pid */
55 old_lpid = mfspr(SPRN_LPID);
57 mtspr(SPRN_LPID, lpid);
59 old_pid = mfspr(SPRN_PID);
67 ret = raw_copy_from_user(to, from, n);
69 ret = raw_copy_to_user(to, from, n);
72 /* switch the pid first to avoid running host with unallocated pid */
73 if (quadrant == 1 && pid != old_pid)
74 mtspr(SPRN_PID, old_pid);
76 mtspr(SPRN_LPID, old_lpid);
83 EXPORT_SYMBOL_GPL(__kvmhv_copy_tofrom_guest_radix);
85 static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
86 void *to, void *from, unsigned long n)
88 int lpid = vcpu->kvm->arch.lpid;
89 int pid = vcpu->arch.pid;
91 /* This would cause a data segment intr so don't allow the access */
92 if (eaddr & (0x3FFUL << 52))
95 /* Should we be using the nested lpid */
96 if (vcpu->arch.nested)
97 lpid = vcpu->arch.nested->shadow_lpid;
99 /* If accessing quadrant 3 then pid is expected to be 0 */
100 if (((eaddr >> 62) & 0x3) == 0x3)
103 eaddr &= ~(0xFFFUL << 52);
105 return __kvmhv_copy_tofrom_guest_radix(lpid, pid, eaddr, to, from, n);
108 long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to,
113 ret = kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, to, NULL, n);
115 memset(to + (n - ret), 0, ret);
119 EXPORT_SYMBOL_GPL(kvmhv_copy_from_guest_radix);
121 long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *from,
124 return kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, NULL, from, n);
126 EXPORT_SYMBOL_GPL(kvmhv_copy_to_guest_radix);
128 int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
129 struct kvmppc_pte *gpte, u64 root,
132 struct kvm *kvm = vcpu->kvm;
134 unsigned long rts, bits, offset, index;
138 rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
139 ((root & RTS2_MASK) >> RTS2_SHIFT);
140 bits = root & RPDS_MASK;
141 base = root & RPDB_MASK;
145 /* Current implementations only support 52-bit space */
149 /* Walk each level of the radix tree */
150 for (level = 3; level >= 0; --level) {
152 /* Check a valid size */
153 if (level && bits != p9_supported_radix_bits[level])
155 if (level == 0 && !(bits == 5 || bits == 9))
158 index = (eaddr >> offset) & ((1UL << bits) - 1);
159 /* Check that low bits of page table base are zero */
160 if (base & ((1UL << (bits + 3)) - 1))
162 /* Read the entry from guest memory */
163 addr = base + (index * sizeof(rpte));
164 ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte));
170 pte = __be64_to_cpu(rpte);
171 if (!(pte & _PAGE_PRESENT))
173 /* Check if a leaf entry */
176 /* Get ready to walk the next level */
177 base = pte & RPDB_MASK;
178 bits = pte & RPDS_MASK;
181 /* Need a leaf at lowest level; 512GB pages not supported */
182 if (level < 0 || level == 3)
185 /* We found a valid leaf PTE */
186 /* Offset is now log base 2 of the page size */
187 gpa = pte & 0x01fffffffffff000ul;
188 if (gpa & ((1ul << offset) - 1))
190 gpa |= eaddr & ((1ul << offset) - 1);
191 for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps)
192 if (offset == mmu_psize_defs[ps].shift)
194 gpte->page_size = ps;
195 gpte->page_shift = offset;
200 /* Work out permissions */
201 gpte->may_read = !!(pte & _PAGE_READ);
202 gpte->may_write = !!(pte & _PAGE_WRITE);
203 gpte->may_execute = !!(pte & _PAGE_EXEC);
205 gpte->rc = pte & (_PAGE_ACCESSED | _PAGE_DIRTY);
214 * Used to walk a partition or process table radix tree in guest memory
215 * Note: We exploit the fact that a partition table and a process
216 * table have the same layout, a partition-scoped page table and a
217 * process-scoped page table have the same layout, and the 2nd
218 * doubleword of a partition table entry has the same layout as
221 int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
222 struct kvmppc_pte *gpte, u64 table,
223 int table_index, u64 *pte_ret_p)
225 struct kvm *kvm = vcpu->kvm;
227 unsigned long size, ptbl, root;
228 struct prtb_entry entry;
230 if ((table & PRTS_MASK) > 24)
232 size = 1ul << ((table & PRTS_MASK) + 12);
234 /* Is the table big enough to contain this entry? */
235 if ((table_index * sizeof(entry)) >= size)
238 /* Read the table to find the root of the radix tree */
239 ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry));
240 ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry));
244 /* Root is stored in the first double word */
245 root = be64_to_cpu(entry.prtb0);
247 return kvmppc_mmu_walk_radix_tree(vcpu, eaddr, gpte, root, pte_ret_p);
250 int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
251 struct kvmppc_pte *gpte, bool data, bool iswrite)
257 /* Work out effective PID */
258 switch (eaddr >> 62) {
260 pid = vcpu->arch.pid;
269 ret = kvmppc_mmu_radix_translate_table(vcpu, eaddr, gpte,
270 vcpu->kvm->arch.process_table, pid, &pte);
274 /* Check privilege (applies only to process scoped translations) */
275 if (kvmppc_get_msr(vcpu) & MSR_PR) {
276 if (pte & _PAGE_PRIVILEGED) {
279 gpte->may_execute = 0;
282 if (!(pte & _PAGE_PRIVILEGED)) {
283 /* Check AMR/IAMR to see if strict mode is in force */
284 if (vcpu->arch.amr & (1ul << 62))
286 if (vcpu->arch.amr & (1ul << 63))
288 if (vcpu->arch.iamr & (1ul << 62))
289 gpte->may_execute = 0;
296 void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
297 unsigned int pshift, unsigned int lpid)
299 unsigned long psize = PAGE_SIZE;
305 psize = 1UL << pshift;
309 addr &= ~(psize - 1);
311 if (!kvmhv_on_pseries()) {
312 radix__flush_tlb_lpid_page(lpid, addr, psize);
316 psi = shift_to_mmu_psize(pshift);
317 rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
318 rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
321 pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
324 static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
328 if (!kvmhv_on_pseries()) {
329 radix__flush_pwc_lpid(lpid);
333 rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
334 lpid, TLBIEL_INVAL_SET_LPID);
336 pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc);
339 static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
340 unsigned long clr, unsigned long set,
341 unsigned long addr, unsigned int shift)
343 return __radix_pte_update(ptep, clr, set);
346 void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
347 pte_t *ptep, pte_t pte)
349 radix__set_pte_at(kvm->mm, addr, ptep, pte, 0);
352 static struct kmem_cache *kvm_pte_cache;
353 static struct kmem_cache *kvm_pmd_cache;
355 static pte_t *kvmppc_pte_alloc(void)
359 pte = kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
360 /* pmd_populate() will only reference _pa(pte). */
361 kmemleak_ignore(pte);
366 static void kvmppc_pte_free(pte_t *ptep)
368 kmem_cache_free(kvm_pte_cache, ptep);
371 static pmd_t *kvmppc_pmd_alloc(void)
375 pmd = kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
376 /* pud_populate() will only reference _pa(pmd). */
377 kmemleak_ignore(pmd);
382 static void kvmppc_pmd_free(pmd_t *pmdp)
384 kmem_cache_free(kvm_pmd_cache, pmdp);
387 /* Called with kvm->mmu_lock held */
388 void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
390 const struct kvm_memory_slot *memslot,
395 unsigned long gfn = gpa >> PAGE_SHIFT;
396 unsigned long page_size = PAGE_SIZE;
399 old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift);
400 kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
402 /* The following only applies to L1 entries */
403 if (lpid != kvm->arch.lpid)
407 memslot = gfn_to_memslot(kvm, gfn);
411 if (shift) { /* 1GB or 2MB page */
412 page_size = 1ul << shift;
413 if (shift == PMD_SHIFT)
414 kvm->stat.num_2M_pages--;
415 else if (shift == PUD_SHIFT)
416 kvm->stat.num_1G_pages--;
419 gpa &= ~(page_size - 1);
420 hpa = old & PTE_RPN_MASK;
421 kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size);
423 if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap)
424 kvmppc_update_dirty_map(memslot, gfn, page_size);
428 * kvmppc_free_p?d are used to free existing page tables, and recursively
429 * descend and clear and free children.
430 * Callers are responsible for flushing the PWC.
432 * When page tables are being unmapped/freed as part of page fault path
433 * (full == false), ptes are not expected. There is code to unmap them
434 * and emit a warning if encountered, but there may already be data
435 * corruption due to the unexpected mappings.
437 static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
441 memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE);
446 for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
447 if (pte_val(*p) == 0)
450 kvmppc_unmap_pte(kvm, p,
451 pte_pfn(*p) << PAGE_SHIFT,
452 PAGE_SHIFT, NULL, lpid);
456 kvmppc_pte_free(pte);
459 static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
465 for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
466 if (!pmd_present(*p))
468 if (pmd_is_leaf(*p)) {
473 kvmppc_unmap_pte(kvm, (pte_t *)p,
474 pte_pfn(*(pte_t *)p) << PAGE_SHIFT,
475 PMD_SHIFT, NULL, lpid);
480 pte = pte_offset_map(p, 0);
481 kvmppc_unmap_free_pte(kvm, pte, full, lpid);
485 kvmppc_pmd_free(pmd);
488 static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
494 for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
495 if (!pud_present(*p))
497 if (pud_is_leaf(*p)) {
502 pmd = pmd_offset(p, 0);
503 kvmppc_unmap_free_pmd(kvm, pmd, true, lpid);
507 pud_free(kvm->mm, pud);
510 void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid)
514 for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
517 if (!pgd_present(*pgd))
519 pud = pud_offset(pgd, 0);
520 kvmppc_unmap_free_pud(kvm, pud, lpid);
525 void kvmppc_free_radix(struct kvm *kvm)
527 if (kvm->arch.pgtable) {
528 kvmppc_free_pgtable_radix(kvm, kvm->arch.pgtable,
530 pgd_free(kvm->mm, kvm->arch.pgtable);
531 kvm->arch.pgtable = NULL;
535 static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
536 unsigned long gpa, unsigned int lpid)
538 pte_t *pte = pte_offset_kernel(pmd, 0);
541 * Clearing the pmd entry then flushing the PWC ensures that the pte
542 * page no longer be cached by the MMU, so can be freed without
543 * flushing the PWC again.
546 kvmppc_radix_flush_pwc(kvm, lpid);
548 kvmppc_unmap_free_pte(kvm, pte, false, lpid);
551 static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
552 unsigned long gpa, unsigned int lpid)
554 pmd_t *pmd = pmd_offset(pud, 0);
557 * Clearing the pud entry then flushing the PWC ensures that the pmd
558 * page and any children pte pages will no longer be cached by the MMU,
559 * so can be freed without flushing the PWC again.
562 kvmppc_radix_flush_pwc(kvm, lpid);
564 kvmppc_unmap_free_pmd(kvm, pmd, false, lpid);
568 * There are a number of bits which may differ between different faults to
569 * the same partition scope entry. RC bits, in the course of cleaning and
570 * aging. And the write bit can change, either the access could have been
571 * upgraded, or a read fault could happen concurrently with a write fault
572 * that sets those bits first.
574 #define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED))
576 int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
577 unsigned long gpa, unsigned int level,
578 unsigned long mmu_seq, unsigned int lpid,
579 unsigned long *rmapp, struct rmap_nested **n_rmap)
582 pud_t *pud, *new_pud = NULL;
583 pmd_t *pmd, *new_pmd = NULL;
584 pte_t *ptep, *new_ptep = NULL;
587 /* Traverse the guest's 2nd-level tree, allocate new levels needed */
588 pgd = pgtable + pgd_index(gpa);
590 if (pgd_present(*pgd))
591 pud = pud_offset(pgd, gpa);
593 new_pud = pud_alloc_one(kvm->mm, gpa);
596 if (pud && pud_present(*pud) && !pud_is_leaf(*pud))
597 pmd = pmd_offset(pud, gpa);
599 new_pmd = kvmppc_pmd_alloc();
601 if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
602 new_ptep = kvmppc_pte_alloc();
604 /* Check if we might have been invalidated; let the guest retry if so */
605 spin_lock(&kvm->mmu_lock);
607 if (mmu_notifier_retry(kvm, mmu_seq))
610 /* Now traverse again under the lock and change the tree */
612 if (pgd_none(*pgd)) {
615 pgd_populate(kvm->mm, pgd, new_pud);
618 pud = pud_offset(pgd, gpa);
619 if (pud_is_leaf(*pud)) {
620 unsigned long hgpa = gpa & PUD_MASK;
622 /* Check if we raced and someone else has set the same thing */
624 if (pud_raw(*pud) == pte_raw(pte)) {
628 /* Valid 1GB page here already, add our extra bits */
629 WARN_ON_ONCE((pud_val(*pud) ^ pte_val(pte)) &
630 PTE_BITS_MUST_MATCH);
631 kvmppc_radix_update_pte(kvm, (pte_t *)pud,
632 0, pte_val(pte), hgpa, PUD_SHIFT);
637 * If we raced with another CPU which has just put
638 * a 1GB pte in after we saw a pmd page, try again.
644 /* Valid 1GB page here already, remove it */
645 kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL,
649 if (!pud_none(*pud)) {
651 * There's a page table page here, but we wanted to
652 * install a large page, so remove and free the page
655 kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid);
657 kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte);
659 kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
663 if (pud_none(*pud)) {
666 pud_populate(kvm->mm, pud, new_pmd);
669 pmd = pmd_offset(pud, gpa);
670 if (pmd_is_leaf(*pmd)) {
671 unsigned long lgpa = gpa & PMD_MASK;
673 /* Check if we raced and someone else has set the same thing */
675 if (pmd_raw(*pmd) == pte_raw(pte)) {
679 /* Valid 2MB page here already, add our extra bits */
680 WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) &
681 PTE_BITS_MUST_MATCH);
682 kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
683 0, pte_val(pte), lgpa, PMD_SHIFT);
689 * If we raced with another CPU which has just put
690 * a 2MB pte in after we saw a pte page, try again.
696 /* Valid 2MB page here already, remove it */
697 kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL,
701 if (!pmd_none(*pmd)) {
703 * There's a page table page here, but we wanted to
704 * install a large page, so remove and free the page
707 kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid);
709 kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
711 kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
715 if (pmd_none(*pmd)) {
718 pmd_populate(kvm->mm, pmd, new_ptep);
721 ptep = pte_offset_kernel(pmd, gpa);
722 if (pte_present(*ptep)) {
723 /* Check if someone else set the same thing */
724 if (pte_raw(*ptep) == pte_raw(pte)) {
728 /* Valid page here already, add our extra bits */
729 WARN_ON_ONCE((pte_val(*ptep) ^ pte_val(pte)) &
730 PTE_BITS_MUST_MATCH);
731 kvmppc_radix_update_pte(kvm, ptep, 0, pte_val(pte), gpa, 0);
735 kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
737 kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
741 spin_unlock(&kvm->mmu_lock);
743 pud_free(kvm->mm, new_pud);
745 kvmppc_pmd_free(new_pmd);
747 kvmppc_pte_free(new_ptep);
751 bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, bool writing,
752 unsigned long gpa, unsigned int lpid)
754 unsigned long pgflags;
759 * Need to set an R or C bit in the 2nd-level tables;
760 * since we are just helping out the hardware here,
761 * it is sufficient to do what the hardware does.
763 pgflags = _PAGE_ACCESSED;
765 pgflags |= _PAGE_DIRTY;
767 * We are walking the secondary (partition-scoped) page table here.
768 * We can do this without disabling irq because the Linux MM
769 * subsystem doesn't do THP splits and collapses on this tree.
771 ptep = __find_linux_pte(pgtable, gpa, NULL, &shift);
772 if (ptep && pte_present(*ptep) && (!writing || pte_write(*ptep))) {
773 kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift);
779 int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
781 struct kvm_memory_slot *memslot,
782 bool writing, bool kvm_ro,
783 pte_t *inserted_pte, unsigned int *levelp)
785 struct kvm *kvm = vcpu->kvm;
786 struct page *page = NULL;
787 unsigned long mmu_seq;
788 unsigned long hva, gfn = gpa >> PAGE_SHIFT;
789 bool upgrade_write = false;
790 bool *upgrade_p = &upgrade_write;
792 unsigned int shift, level;
796 /* used to check for invalidations in progress */
797 mmu_seq = kvm->mmu_notifier_seq;
801 * Do a fast check first, since __gfn_to_pfn_memslot doesn't
802 * do it with !atomic && !async, which is how we call it.
803 * We always ask for write permission since the common case
804 * is that the page is writable.
806 hva = gfn_to_hva_memslot(memslot, gfn);
807 if (!kvm_ro && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
808 upgrade_write = true;
812 /* Call KVM generic code to do the slow-path check */
813 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
815 if (is_error_noslot_pfn(pfn))
818 if (pfn_valid(pfn)) {
819 page = pfn_to_page(pfn);
820 if (PageReserved(page))
826 * Read the PTE from the process' radix tree and use that
827 * so we get the shift and attribute bits.
830 ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
832 * If the PTE disappeared temporarily due to a THP
833 * collapse, just return and let the guest try again.
844 /* If we're logging dirty pages, always map single pages */
845 large_enable = !(memslot->flags & KVM_MEM_LOG_DIRTY_PAGES);
847 /* Get pte level from shift/size */
848 if (large_enable && shift == PUD_SHIFT &&
849 (gpa & (PUD_SIZE - PAGE_SIZE)) ==
850 (hva & (PUD_SIZE - PAGE_SIZE))) {
852 } else if (large_enable && shift == PMD_SHIFT &&
853 (gpa & (PMD_SIZE - PAGE_SIZE)) ==
854 (hva & (PMD_SIZE - PAGE_SIZE))) {
858 if (shift > PAGE_SHIFT) {
860 * If the pte maps more than one page, bring over
861 * bits from the virtual address to get the real
862 * address of the specific single page we want.
864 unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
865 pte = __pte(pte_val(pte) | (hva & rpnmask));
869 pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
870 if (writing || upgrade_write) {
871 if (pte_val(pte) & _PAGE_WRITE)
872 pte = __pte(pte_val(pte) | _PAGE_DIRTY);
874 pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
877 /* Allocate space in the tree and write the PTE */
878 ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level,
879 mmu_seq, kvm->arch.lpid, NULL, NULL);
886 if (!ret && (pte_val(pte) & _PAGE_WRITE))
887 set_page_dirty_lock(page);
891 /* Increment number of large pages if we (successfully) inserted one */
894 kvm->stat.num_2M_pages++;
896 kvm->stat.num_1G_pages++;
902 int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
903 unsigned long ea, unsigned long dsisr)
905 struct kvm *kvm = vcpu->kvm;
906 unsigned long gpa, gfn;
907 struct kvm_memory_slot *memslot;
909 bool writing = !!(dsisr & DSISR_ISSTORE);
912 /* Check for unusual errors */
913 if (dsisr & DSISR_UNSUPP_MMU) {
914 pr_err("KVM: Got unsupported MMU fault\n");
917 if (dsisr & DSISR_BADACCESS) {
918 /* Reflect to the guest as DSI */
919 pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
920 kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
924 /* Translate the logical address */
925 gpa = vcpu->arch.fault_gpa & ~0xfffUL;
926 gpa &= ~0xF000000000000000ul;
927 gfn = gpa >> PAGE_SHIFT;
928 if (!(dsisr & DSISR_PRTABLE_FAULT))
931 /* Get the corresponding memslot */
932 memslot = gfn_to_memslot(kvm, gfn);
934 /* No memslot means it's an emulated MMIO region */
935 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
936 if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS |
939 * Bad address in guest page table tree, or other
940 * unusual error - reflect it to the guest as DSI.
942 kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
945 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing);
948 if (memslot->flags & KVM_MEM_READONLY) {
950 /* give the guest a DSI */
951 kvmppc_core_queue_data_storage(vcpu, ea, DSISR_ISSTORE |
958 /* Failed to set the reference/change bits */
959 if (dsisr & DSISR_SET_RC) {
960 spin_lock(&kvm->mmu_lock);
961 if (kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable,
962 writing, gpa, kvm->arch.lpid))
963 dsisr &= ~DSISR_SET_RC;
964 spin_unlock(&kvm->mmu_lock);
966 if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
967 DSISR_PROTFAULT | DSISR_SET_RC)))
971 /* Try to insert a pte */
972 ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing,
975 if (ret == 0 || ret == -EAGAIN)
980 /* Called with kvm->mmu_lock held */
981 int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
985 unsigned long gpa = gfn << PAGE_SHIFT;
988 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
989 if (ptep && pte_present(*ptep))
990 kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
995 /* Called with kvm->mmu_lock held */
996 int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
1000 unsigned long gpa = gfn << PAGE_SHIFT;
1003 unsigned long old, *rmapp;
1005 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
1006 if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
1007 old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
1009 /* XXX need to flush tlb here? */
1010 /* Also clear bit in ptes in shadow pgtable for nested guests */
1011 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1012 kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_ACCESSED, 0,
1020 /* Called with kvm->mmu_lock held */
1021 int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
1025 unsigned long gpa = gfn << PAGE_SHIFT;
1029 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
1030 if (ptep && pte_present(*ptep) && pte_young(*ptep))
1035 /* Returns the number of PAGE_SIZE pages that are dirty */
1036 static int kvm_radix_test_clear_dirty(struct kvm *kvm,
1037 struct kvm_memory_slot *memslot, int pagenum)
1039 unsigned long gfn = memslot->base_gfn + pagenum;
1040 unsigned long gpa = gfn << PAGE_SHIFT;
1044 unsigned long old, *rmapp;
1046 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
1047 if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
1050 ret = 1 << (shift - PAGE_SHIFT);
1051 spin_lock(&kvm->mmu_lock);
1052 old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
1054 kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid);
1055 /* Also clear bit in ptes in shadow pgtable for nested guests */
1056 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1057 kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_DIRTY, 0,
1060 spin_unlock(&kvm->mmu_lock);
1065 long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
1066 struct kvm_memory_slot *memslot, unsigned long *map)
1071 for (i = 0; i < memslot->npages; i = j) {
1072 npages = kvm_radix_test_clear_dirty(kvm, memslot, i);
1075 * Note that if npages > 0 then i must be a multiple of npages,
1076 * since huge pages are only used to back the guest at guest
1077 * real addresses that are a multiple of their size.
1078 * Since we have at most one PTE covering any given guest
1079 * real address, if npages > 1 we can skip to i + npages.
1083 set_dirty_bits(map, i, npages);
1090 void kvmppc_radix_flush_memslot(struct kvm *kvm,
1091 const struct kvm_memory_slot *memslot)
1098 gpa = memslot->base_gfn << PAGE_SHIFT;
1099 spin_lock(&kvm->mmu_lock);
1100 for (n = memslot->npages; n; --n) {
1101 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
1102 if (ptep && pte_present(*ptep))
1103 kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
1108 * Increase the mmu notifier sequence number to prevent any page
1109 * fault that read the memslot earlier from writing a PTE.
1111 kvm->mmu_notifier_seq++;
1112 spin_unlock(&kvm->mmu_lock);
1115 static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info,
1116 int psize, int *indexp)
1118 if (!mmu_psize_defs[psize].shift)
1120 info->ap_encodings[*indexp] = mmu_psize_defs[psize].shift |
1121 (mmu_psize_defs[psize].ap << 29);
1125 int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info)
1129 if (!radix_enabled())
1131 memset(info, 0, sizeof(*info));
1134 info->geometries[0].page_shift = 12;
1135 info->geometries[0].level_bits[0] = 9;
1136 for (i = 1; i < 4; ++i)
1137 info->geometries[0].level_bits[i] = p9_supported_radix_bits[i];
1139 info->geometries[1].page_shift = 16;
1140 for (i = 0; i < 4; ++i)
1141 info->geometries[1].level_bits[i] = p9_supported_radix_bits[i];
1144 add_rmmu_ap_encoding(info, MMU_PAGE_4K, &i);
1145 add_rmmu_ap_encoding(info, MMU_PAGE_64K, &i);
1146 add_rmmu_ap_encoding(info, MMU_PAGE_2M, &i);
1147 add_rmmu_ap_encoding(info, MMU_PAGE_1G, &i);
1152 int kvmppc_init_vm_radix(struct kvm *kvm)
1154 kvm->arch.pgtable = pgd_alloc(kvm->mm);
1155 if (!kvm->arch.pgtable)
1160 static void pte_ctor(void *addr)
1162 memset(addr, 0, RADIX_PTE_TABLE_SIZE);
1165 static void pmd_ctor(void *addr)
1167 memset(addr, 0, RADIX_PMD_TABLE_SIZE);
1170 struct debugfs_radix_state {
1181 static int debugfs_radix_open(struct inode *inode, struct file *file)
1183 struct kvm *kvm = inode->i_private;
1184 struct debugfs_radix_state *p;
1186 p = kzalloc(sizeof(*p), GFP_KERNEL);
1192 mutex_init(&p->mutex);
1193 file->private_data = p;
1195 return nonseekable_open(inode, file);
1198 static int debugfs_radix_release(struct inode *inode, struct file *file)
1200 struct debugfs_radix_state *p = file->private_data;
1202 kvm_put_kvm(p->kvm);
1207 static ssize_t debugfs_radix_read(struct file *file, char __user *buf,
1208 size_t len, loff_t *ppos)
1210 struct debugfs_radix_state *p = file->private_data;
1216 struct kvm_nested_guest *nested;
1225 if (!kvm_is_radix(kvm))
1228 ret = mutex_lock_interruptible(&p->mutex);
1232 if (p->chars_left) {
1236 r = copy_to_user(buf, p->buf + p->buf_index, n);
1253 while (len != 0 && p->lpid >= 0) {
1254 if (gpa >= RADIX_PGTABLE_RANGE) {
1258 kvmhv_put_nested(nested);
1261 p->lpid = kvmhv_nested_next_lpid(kvm, p->lpid);
1268 pgt = kvm->arch.pgtable;
1270 nested = kvmhv_get_nested(kvm, p->lpid, false);
1272 gpa = RADIX_PGTABLE_RANGE;
1275 pgt = nested->shadow_pgtable;
1281 n = scnprintf(p->buf, sizeof(p->buf),
1282 "\nNested LPID %d: ", p->lpid);
1283 n += scnprintf(p->buf + n, sizeof(p->buf) - n,
1284 "pgdir: %lx\n", (unsigned long)pgt);
1289 pgdp = pgt + pgd_index(gpa);
1290 pgd = READ_ONCE(*pgdp);
1291 if (!(pgd_val(pgd) & _PAGE_PRESENT)) {
1292 gpa = (gpa & PGDIR_MASK) + PGDIR_SIZE;
1296 pudp = pud_offset(&pgd, gpa);
1297 pud = READ_ONCE(*pudp);
1298 if (!(pud_val(pud) & _PAGE_PRESENT)) {
1299 gpa = (gpa & PUD_MASK) + PUD_SIZE;
1302 if (pud_val(pud) & _PAGE_PTE) {
1308 pmdp = pmd_offset(&pud, gpa);
1309 pmd = READ_ONCE(*pmdp);
1310 if (!(pmd_val(pmd) & _PAGE_PRESENT)) {
1311 gpa = (gpa & PMD_MASK) + PMD_SIZE;
1314 if (pmd_val(pmd) & _PAGE_PTE) {
1320 ptep = pte_offset_kernel(&pmd, gpa);
1321 pte = pte_val(READ_ONCE(*ptep));
1322 if (!(pte & _PAGE_PRESENT)) {
1328 n = scnprintf(p->buf, sizeof(p->buf),
1329 " %lx: %lx %d\n", gpa, pte, shift);
1330 gpa += 1ul << shift;
1335 r = copy_to_user(buf, p->buf, n);
1350 kvmhv_put_nested(nested);
1353 mutex_unlock(&p->mutex);
1357 static ssize_t debugfs_radix_write(struct file *file, const char __user *buf,
1358 size_t len, loff_t *ppos)
1363 static const struct file_operations debugfs_radix_fops = {
1364 .owner = THIS_MODULE,
1365 .open = debugfs_radix_open,
1366 .release = debugfs_radix_release,
1367 .read = debugfs_radix_read,
1368 .write = debugfs_radix_write,
1369 .llseek = generic_file_llseek,
1372 void kvmhv_radix_debugfs_init(struct kvm *kvm)
1374 kvm->arch.radix_dentry = debugfs_create_file("radix", 0400,
1375 kvm->arch.debugfs_dir, kvm,
1376 &debugfs_radix_fops);
1379 int kvmppc_radix_init(void)
1381 unsigned long size = sizeof(void *) << RADIX_PTE_INDEX_SIZE;
1383 kvm_pte_cache = kmem_cache_create("kvm-pte", size, size, 0, pte_ctor);
1387 size = sizeof(void *) << RADIX_PMD_INDEX_SIZE;
1389 kvm_pmd_cache = kmem_cache_create("kvm-pmd", size, size, 0, pmd_ctor);
1390 if (!kvm_pmd_cache) {
1391 kmem_cache_destroy(kvm_pte_cache);
1398 void kvmppc_radix_exit(void)
1400 kmem_cache_destroy(kvm_pte_cache);
1401 kmem_cache_destroy(kvm_pmd_cache);