1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
7 #include <linux/mman.h>
8 #include <linux/kvm_host.h>
10 #include <linux/hugetlb.h>
11 #include <linux/sched/signal.h>
12 #include <trace/events/kvm.h>
13 #include <asm/pgalloc.h>
14 #include <asm/cacheflush.h>
15 #include <asm/kvm_arm.h>
16 #include <asm/kvm_mmu.h>
17 #include <asm/kvm_pgtable.h>
18 #include <asm/kvm_ras.h>
19 #include <asm/kvm_asm.h>
20 #include <asm/kvm_emulate.h>
25 static struct kvm_pgtable *hyp_pgtable;
26 static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
28 static unsigned long __ro_after_init hyp_idmap_start;
29 static unsigned long __ro_after_init hyp_idmap_end;
30 static phys_addr_t __ro_after_init hyp_idmap_vector;
32 static unsigned long __ro_after_init io_map_base;
34 static phys_addr_t __stage2_range_addr_end(phys_addr_t addr, phys_addr_t end,
37 phys_addr_t boundary = ALIGN_DOWN(addr + size, size);
39 return (boundary - 1 < end - 1) ? boundary : end;
42 static phys_addr_t stage2_range_addr_end(phys_addr_t addr, phys_addr_t end)
44 phys_addr_t size = kvm_granule_size(KVM_PGTABLE_MIN_BLOCK_LEVEL);
46 return __stage2_range_addr_end(addr, end, size);
50 * Release kvm_mmu_lock periodically if the memory region is large. Otherwise,
51 * we may see kernel panics with CONFIG_DETECT_HUNG_TASK,
52 * CONFIG_LOCKUP_DETECTOR, CONFIG_LOCKDEP. Additionally, holding the lock too
53 * long will also starve other vCPUs. We have to also make sure that the page
54 * tables are not freed while we released the lock.
56 static int stage2_apply_range(struct kvm_s2_mmu *mmu, phys_addr_t addr,
58 int (*fn)(struct kvm_pgtable *, u64, u64),
61 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
66 struct kvm_pgtable *pgt = mmu->pgt;
70 next = stage2_range_addr_end(addr, end);
71 ret = fn(pgt, addr, next - addr);
75 if (resched && next != end)
76 cond_resched_rwlock_write(&kvm->mmu_lock);
77 } while (addr = next, addr != end);
82 #define stage2_apply_range_resched(mmu, addr, end, fn) \
83 stage2_apply_range(mmu, addr, end, fn, true)
86 * Get the maximum number of page-tables pages needed to split a range
87 * of blocks into PAGE_SIZE PTEs. It assumes the range is already
88 * mapped at level 2, or at level 1 if allowed.
90 static int kvm_mmu_split_nr_page_tables(u64 range)
94 if (KVM_PGTABLE_MIN_BLOCK_LEVEL < 2)
95 n += DIV_ROUND_UP(range, PUD_SIZE);
96 n += DIV_ROUND_UP(range, PMD_SIZE);
100 static bool need_split_memcache_topup_or_resched(struct kvm *kvm)
102 struct kvm_mmu_memory_cache *cache;
105 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock))
108 chunk_size = kvm->arch.mmu.split_page_chunk_size;
109 min = kvm_mmu_split_nr_page_tables(chunk_size);
110 cache = &kvm->arch.mmu.split_page_cache;
111 return kvm_mmu_memory_cache_nr_free_objects(cache) < min;
114 static int kvm_mmu_split_huge_pages(struct kvm *kvm, phys_addr_t addr,
117 struct kvm_mmu_memory_cache *cache;
118 struct kvm_pgtable *pgt;
119 int ret, cache_capacity;
120 u64 next, chunk_size;
122 lockdep_assert_held_write(&kvm->mmu_lock);
124 chunk_size = kvm->arch.mmu.split_page_chunk_size;
125 cache_capacity = kvm_mmu_split_nr_page_tables(chunk_size);
130 cache = &kvm->arch.mmu.split_page_cache;
133 if (need_split_memcache_topup_or_resched(kvm)) {
134 write_unlock(&kvm->mmu_lock);
136 /* Eager page splitting is best-effort. */
137 ret = __kvm_mmu_topup_memory_cache(cache,
140 write_lock(&kvm->mmu_lock);
145 pgt = kvm->arch.mmu.pgt;
149 next = __stage2_range_addr_end(addr, end, chunk_size);
150 ret = kvm_pgtable_stage2_split(pgt, addr, next - addr, cache);
153 } while (addr = next, addr != end);
158 static bool memslot_is_logging(struct kvm_memory_slot *memslot)
160 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
164 * kvm_arch_flush_remote_tlbs() - flush all VM TLB entries for v7/8
165 * @kvm: pointer to kvm structure.
167 * Interface to HYP function to flush all VM TLB entries
169 int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
171 kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu);
175 int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm,
176 gfn_t gfn, u64 nr_pages)
178 kvm_tlb_flush_vmid_range(&kvm->arch.mmu,
179 gfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
183 static bool kvm_is_device_pfn(unsigned long pfn)
185 return !pfn_is_map_memory(pfn);
188 static void *stage2_memcache_zalloc_page(void *arg)
190 struct kvm_mmu_memory_cache *mc = arg;
193 /* Allocated with __GFP_ZERO, so no need to zero */
194 virt = kvm_mmu_memory_cache_alloc(mc);
196 kvm_account_pgtable_pages(virt, 1);
200 static void *kvm_host_zalloc_pages_exact(size_t size)
202 return alloc_pages_exact(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
205 static void *kvm_s2_zalloc_pages_exact(size_t size)
207 void *virt = kvm_host_zalloc_pages_exact(size);
210 kvm_account_pgtable_pages(virt, (size >> PAGE_SHIFT));
214 static void kvm_s2_free_pages_exact(void *virt, size_t size)
216 kvm_account_pgtable_pages(virt, -(size >> PAGE_SHIFT));
217 free_pages_exact(virt, size);
220 static struct kvm_pgtable_mm_ops kvm_s2_mm_ops;
222 static void stage2_free_unlinked_table_rcu_cb(struct rcu_head *head)
224 struct page *page = container_of(head, struct page, rcu_head);
225 void *pgtable = page_to_virt(page);
226 u32 level = page_private(page);
228 kvm_pgtable_stage2_free_unlinked(&kvm_s2_mm_ops, pgtable, level);
231 static void stage2_free_unlinked_table(void *addr, u32 level)
233 struct page *page = virt_to_page(addr);
235 set_page_private(page, (unsigned long)level);
236 call_rcu(&page->rcu_head, stage2_free_unlinked_table_rcu_cb);
239 static void kvm_host_get_page(void *addr)
241 get_page(virt_to_page(addr));
244 static void kvm_host_put_page(void *addr)
246 put_page(virt_to_page(addr));
249 static void kvm_s2_put_page(void *addr)
251 struct page *p = virt_to_page(addr);
252 /* Dropping last refcount, the page will be freed */
253 if (page_count(p) == 1)
254 kvm_account_pgtable_pages(addr, -1);
258 static int kvm_host_page_count(void *addr)
260 return page_count(virt_to_page(addr));
263 static phys_addr_t kvm_host_pa(void *addr)
268 static void *kvm_host_va(phys_addr_t phys)
273 static void clean_dcache_guest_page(void *va, size_t size)
275 __clean_dcache_guest_page(va, size);
278 static void invalidate_icache_guest_page(void *va, size_t size)
280 __invalidate_icache_guest_page(va, size);
284 * Unmapping vs dcache management:
286 * If a guest maps certain memory pages as uncached, all writes will
287 * bypass the data cache and go directly to RAM. However, the CPUs
288 * can still speculate reads (not writes) and fill cache lines with
291 * Those cache lines will be *clean* cache lines though, so a
292 * clean+invalidate operation is equivalent to an invalidate
293 * operation, because no cache lines are marked dirty.
295 * Those clean cache lines could be filled prior to an uncached write
296 * by the guest, and the cache coherent IO subsystem would therefore
297 * end up writing old data to disk.
299 * This is why right after unmapping a page/section and invalidating
300 * the corresponding TLBs, we flush to make sure the IO subsystem will
301 * never hit in the cache.
303 * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as
304 * we then fully enforce cacheability of RAM, no matter what the guest
308 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
309 * @mmu: The KVM stage-2 MMU pointer
310 * @start: The intermediate physical base address of the range to unmap
311 * @size: The size of the area to unmap
312 * @may_block: Whether or not we are permitted to block
314 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
315 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
316 * destroying the VM), otherwise another faulting VCPU may come in and mess
317 * with things behind our backs.
319 static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size,
322 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
323 phys_addr_t end = start + size;
325 lockdep_assert_held_write(&kvm->mmu_lock);
326 WARN_ON(size & ~PAGE_MASK);
327 WARN_ON(stage2_apply_range(mmu, start, end, kvm_pgtable_stage2_unmap,
331 static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
333 __unmap_stage2_range(mmu, start, size, true);
336 static void stage2_flush_memslot(struct kvm *kvm,
337 struct kvm_memory_slot *memslot)
339 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
340 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
342 stage2_apply_range_resched(&kvm->arch.mmu, addr, end, kvm_pgtable_stage2_flush);
346 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
347 * @kvm: The struct kvm pointer
349 * Go through the stage 2 page tables and invalidate any cache lines
350 * backing memory already mapped to the VM.
352 static void stage2_flush_vm(struct kvm *kvm)
354 struct kvm_memslots *slots;
355 struct kvm_memory_slot *memslot;
358 idx = srcu_read_lock(&kvm->srcu);
359 write_lock(&kvm->mmu_lock);
361 slots = kvm_memslots(kvm);
362 kvm_for_each_memslot(memslot, bkt, slots)
363 stage2_flush_memslot(kvm, memslot);
365 write_unlock(&kvm->mmu_lock);
366 srcu_read_unlock(&kvm->srcu, idx);
370 * free_hyp_pgds - free Hyp-mode page tables
372 void __init free_hyp_pgds(void)
374 mutex_lock(&kvm_hyp_pgd_mutex);
376 kvm_pgtable_hyp_destroy(hyp_pgtable);
380 mutex_unlock(&kvm_hyp_pgd_mutex);
383 static bool kvm_host_owns_hyp_mappings(void)
385 if (is_kernel_in_hyp_mode())
388 if (static_branch_likely(&kvm_protected_mode_initialized))
392 * This can happen at boot time when __create_hyp_mappings() is called
393 * after the hyp protection has been enabled, but the static key has
394 * not been flipped yet.
396 if (!hyp_pgtable && is_protected_kvm_enabled())
399 WARN_ON(!hyp_pgtable);
404 int __create_hyp_mappings(unsigned long start, unsigned long size,
405 unsigned long phys, enum kvm_pgtable_prot prot)
409 if (WARN_ON(!kvm_host_owns_hyp_mappings()))
412 mutex_lock(&kvm_hyp_pgd_mutex);
413 err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot);
414 mutex_unlock(&kvm_hyp_pgd_mutex);
419 static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
421 if (!is_vmalloc_addr(kaddr)) {
422 BUG_ON(!virt_addr_valid(kaddr));
425 return page_to_phys(vmalloc_to_page(kaddr)) +
426 offset_in_page(kaddr);
430 struct hyp_shared_pfn {
436 static DEFINE_MUTEX(hyp_shared_pfns_lock);
437 static struct rb_root hyp_shared_pfns = RB_ROOT;
439 static struct hyp_shared_pfn *find_shared_pfn(u64 pfn, struct rb_node ***node,
440 struct rb_node **parent)
442 struct hyp_shared_pfn *this;
444 *node = &hyp_shared_pfns.rb_node;
447 this = container_of(**node, struct hyp_shared_pfn, node);
450 *node = &((**node)->rb_left);
451 else if (this->pfn > pfn)
452 *node = &((**node)->rb_right);
460 static int share_pfn_hyp(u64 pfn)
462 struct rb_node **node, *parent;
463 struct hyp_shared_pfn *this;
466 mutex_lock(&hyp_shared_pfns_lock);
467 this = find_shared_pfn(pfn, &node, &parent);
473 this = kzalloc(sizeof(*this), GFP_KERNEL);
481 rb_link_node(&this->node, parent, node);
482 rb_insert_color(&this->node, &hyp_shared_pfns);
483 ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp, pfn, 1);
485 mutex_unlock(&hyp_shared_pfns_lock);
490 static int unshare_pfn_hyp(u64 pfn)
492 struct rb_node **node, *parent;
493 struct hyp_shared_pfn *this;
496 mutex_lock(&hyp_shared_pfns_lock);
497 this = find_shared_pfn(pfn, &node, &parent);
498 if (WARN_ON(!this)) {
507 rb_erase(&this->node, &hyp_shared_pfns);
509 ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_hyp, pfn, 1);
511 mutex_unlock(&hyp_shared_pfns_lock);
516 int kvm_share_hyp(void *from, void *to)
518 phys_addr_t start, end, cur;
522 if (is_kernel_in_hyp_mode())
526 * The share hcall maps things in the 'fixed-offset' region of the hyp
527 * VA space, so we can only share physically contiguous data-structures
530 if (is_vmalloc_or_module_addr(from) || is_vmalloc_or_module_addr(to))
533 if (kvm_host_owns_hyp_mappings())
534 return create_hyp_mappings(from, to, PAGE_HYP);
536 start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
537 end = PAGE_ALIGN(__pa(to));
538 for (cur = start; cur < end; cur += PAGE_SIZE) {
539 pfn = __phys_to_pfn(cur);
540 ret = share_pfn_hyp(pfn);
548 void kvm_unshare_hyp(void *from, void *to)
550 phys_addr_t start, end, cur;
553 if (is_kernel_in_hyp_mode() || kvm_host_owns_hyp_mappings() || !from)
556 start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
557 end = PAGE_ALIGN(__pa(to));
558 for (cur = start; cur < end; cur += PAGE_SIZE) {
559 pfn = __phys_to_pfn(cur);
560 WARN_ON(unshare_pfn_hyp(pfn));
565 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
566 * @from: The virtual kernel start address of the range
567 * @to: The virtual kernel end address of the range (exclusive)
568 * @prot: The protection to be applied to this range
570 * The same virtual address as the kernel virtual address is also used
571 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
574 int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
576 phys_addr_t phys_addr;
577 unsigned long virt_addr;
578 unsigned long start = kern_hyp_va((unsigned long)from);
579 unsigned long end = kern_hyp_va((unsigned long)to);
581 if (is_kernel_in_hyp_mode())
584 if (!kvm_host_owns_hyp_mappings())
587 start = start & PAGE_MASK;
588 end = PAGE_ALIGN(end);
590 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
593 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
594 err = __create_hyp_mappings(virt_addr, PAGE_SIZE, phys_addr,
603 static int __hyp_alloc_private_va_range(unsigned long base)
605 lockdep_assert_held(&kvm_hyp_pgd_mutex);
607 if (!PAGE_ALIGNED(base))
611 * Verify that BIT(VA_BITS - 1) hasn't been flipped by
612 * allocating the new area, as it would indicate we've
613 * overflowed the idmap/IO address range.
615 if ((base ^ io_map_base) & BIT(VA_BITS - 1))
624 * hyp_alloc_private_va_range - Allocates a private VA range.
625 * @size: The size of the VA range to reserve.
626 * @haddr: The hypervisor virtual start address of the allocation.
628 * The private virtual address (VA) range is allocated below io_map_base
629 * and aligned based on the order of @size.
631 * Return: 0 on success or negative error code on failure.
633 int hyp_alloc_private_va_range(size_t size, unsigned long *haddr)
638 mutex_lock(&kvm_hyp_pgd_mutex);
641 * This assumes that we have enough space below the idmap
642 * page to allocate our VAs. If not, the check in
643 * __hyp_alloc_private_va_range() will kick. A potential
644 * alternative would be to detect that overflow and switch
645 * to an allocation above the idmap.
647 * The allocated size is always a multiple of PAGE_SIZE.
649 size = PAGE_ALIGN(size);
650 base = io_map_base - size;
651 ret = __hyp_alloc_private_va_range(base);
653 mutex_unlock(&kvm_hyp_pgd_mutex);
661 static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
662 unsigned long *haddr,
663 enum kvm_pgtable_prot prot)
668 if (!kvm_host_owns_hyp_mappings()) {
669 addr = kvm_call_hyp_nvhe(__pkvm_create_private_mapping,
670 phys_addr, size, prot);
671 if (IS_ERR_VALUE(addr))
678 size = PAGE_ALIGN(size + offset_in_page(phys_addr));
679 ret = hyp_alloc_private_va_range(size, &addr);
683 ret = __create_hyp_mappings(addr, size, phys_addr, prot);
687 *haddr = addr + offset_in_page(phys_addr);
691 int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr)
697 mutex_lock(&kvm_hyp_pgd_mutex);
699 * Efficient stack verification using the PAGE_SHIFT bit implies
700 * an alignment of our allocation on the order of the size.
702 size = PAGE_SIZE * 2;
703 base = ALIGN_DOWN(io_map_base - size, size);
705 ret = __hyp_alloc_private_va_range(base);
707 mutex_unlock(&kvm_hyp_pgd_mutex);
710 kvm_err("Cannot allocate hyp stack guard page\n");
715 * Since the stack grows downwards, map the stack to the page
716 * at the higher address and leave the lower guard page
719 * Any valid stack address now has the PAGE_SHIFT bit as 1
720 * and addresses corresponding to the guard page have the
721 * PAGE_SHIFT bit as 0 - this is used for overflow detection.
723 ret = __create_hyp_mappings(base + PAGE_SIZE, PAGE_SIZE, phys_addr,
726 kvm_err("Cannot map hyp stack\n");
728 *haddr = base + size;
734 * create_hyp_io_mappings - Map IO into both kernel and HYP
735 * @phys_addr: The physical start address which gets mapped
736 * @size: Size of the region being mapped
737 * @kaddr: Kernel VA for this mapping
738 * @haddr: HYP VA for this mapping
740 int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
741 void __iomem **kaddr,
742 void __iomem **haddr)
747 if (is_protected_kvm_enabled())
750 *kaddr = ioremap(phys_addr, size);
754 if (is_kernel_in_hyp_mode()) {
759 ret = __create_hyp_private_mapping(phys_addr, size,
760 &addr, PAGE_HYP_DEVICE);
768 *haddr = (void __iomem *)addr;
773 * create_hyp_exec_mappings - Map an executable range into HYP
774 * @phys_addr: The physical start address which gets mapped
775 * @size: Size of the region being mapped
776 * @haddr: HYP VA for this mapping
778 int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
784 BUG_ON(is_kernel_in_hyp_mode());
786 ret = __create_hyp_private_mapping(phys_addr, size,
787 &addr, PAGE_HYP_EXEC);
793 *haddr = (void *)addr;
797 static struct kvm_pgtable_mm_ops kvm_user_mm_ops = {
798 /* We shouldn't need any other callback to walk the PT */
799 .phys_to_virt = kvm_host_va,
802 static int get_user_mapping_size(struct kvm *kvm, u64 addr)
804 struct kvm_pgtable pgt = {
805 .pgd = (kvm_pteref_t)kvm->mm->pgd,
806 .ia_bits = vabits_actual,
807 .start_level = (KVM_PGTABLE_MAX_LEVELS -
808 CONFIG_PGTABLE_LEVELS),
809 .mm_ops = &kvm_user_mm_ops,
812 kvm_pte_t pte = 0; /* Keep GCC quiet... */
817 * Disable IRQs so that we hazard against a concurrent
818 * teardown of the userspace page tables (which relies on
821 local_irq_save(flags);
822 ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level);
823 local_irq_restore(flags);
829 * Not seeing an error, but not updating level? Something went
832 if (WARN_ON(level >= KVM_PGTABLE_MAX_LEVELS))
835 /* Oops, the userspace PTs are gone... Replay the fault */
836 if (!kvm_pte_valid(pte))
839 return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level));
842 static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
843 .zalloc_page = stage2_memcache_zalloc_page,
844 .zalloc_pages_exact = kvm_s2_zalloc_pages_exact,
845 .free_pages_exact = kvm_s2_free_pages_exact,
846 .free_unlinked_table = stage2_free_unlinked_table,
847 .get_page = kvm_host_get_page,
848 .put_page = kvm_s2_put_page,
849 .page_count = kvm_host_page_count,
850 .phys_to_virt = kvm_host_va,
851 .virt_to_phys = kvm_host_pa,
852 .dcache_clean_inval_poc = clean_dcache_guest_page,
853 .icache_inval_pou = invalidate_icache_guest_page,
857 * kvm_init_stage2_mmu - Initialise a S2 MMU structure
858 * @kvm: The pointer to the KVM structure
859 * @mmu: The pointer to the s2 MMU structure
860 * @type: The machine type of the virtual machine
862 * Allocates only the stage-2 HW PGD level table(s).
863 * Note we don't need locking here as this is only called when the VM is
864 * created, which can only be done once.
866 int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type)
868 u32 kvm_ipa_limit = get_kvm_ipa_limit();
870 struct kvm_pgtable *pgt;
874 if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
877 phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);
878 if (is_protected_kvm_enabled()) {
879 phys_shift = kvm_ipa_limit;
880 } else if (phys_shift) {
881 if (phys_shift > kvm_ipa_limit ||
882 phys_shift < ARM64_MIN_PARANGE_BITS)
885 phys_shift = KVM_PHYS_SHIFT;
886 if (phys_shift > kvm_ipa_limit) {
887 pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n",
893 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
894 mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
895 mmu->vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift);
897 if (mmu->pgt != NULL) {
898 kvm_err("kvm_arch already initialized?\n");
902 pgt = kzalloc(sizeof(*pgt), GFP_KERNEL_ACCOUNT);
906 mmu->arch = &kvm->arch;
907 err = kvm_pgtable_stage2_init(pgt, mmu, &kvm_s2_mm_ops);
909 goto out_free_pgtable;
911 mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran));
912 if (!mmu->last_vcpu_ran) {
914 goto out_destroy_pgtable;
917 for_each_possible_cpu(cpu)
918 *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
920 /* The eager page splitting is disabled by default */
921 mmu->split_page_chunk_size = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT;
922 mmu->split_page_cache.gfp_zero = __GFP_ZERO;
925 mmu->pgd_phys = __pa(pgt->pgd);
929 kvm_pgtable_stage2_destroy(pgt);
935 void kvm_uninit_stage2_mmu(struct kvm *kvm)
937 kvm_free_stage2_pgd(&kvm->arch.mmu);
938 kvm_mmu_free_memory_cache(&kvm->arch.mmu.split_page_cache);
941 static void stage2_unmap_memslot(struct kvm *kvm,
942 struct kvm_memory_slot *memslot)
944 hva_t hva = memslot->userspace_addr;
945 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
946 phys_addr_t size = PAGE_SIZE * memslot->npages;
947 hva_t reg_end = hva + size;
950 * A memory region could potentially cover multiple VMAs, and any holes
951 * between them, so iterate over all of them to find out if we should
954 * +--------------------------------------------+
955 * +---------------+----------------+ +----------------+
956 * | : VMA 1 | VMA 2 | | VMA 3 : |
957 * +---------------+----------------+ +----------------+
959 * +--------------------------------------------+
962 struct vm_area_struct *vma;
963 hva_t vm_start, vm_end;
965 vma = find_vma_intersection(current->mm, hva, reg_end);
970 * Take the intersection of this VMA with the memory region
972 vm_start = max(hva, vma->vm_start);
973 vm_end = min(reg_end, vma->vm_end);
975 if (!(vma->vm_flags & VM_PFNMAP)) {
976 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
977 unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
980 } while (hva < reg_end);
984 * stage2_unmap_vm - Unmap Stage-2 RAM mappings
985 * @kvm: The struct kvm pointer
987 * Go through the memregions and unmap any regular RAM
988 * backing memory already mapped to the VM.
990 void stage2_unmap_vm(struct kvm *kvm)
992 struct kvm_memslots *slots;
993 struct kvm_memory_slot *memslot;
996 idx = srcu_read_lock(&kvm->srcu);
997 mmap_read_lock(current->mm);
998 write_lock(&kvm->mmu_lock);
1000 slots = kvm_memslots(kvm);
1001 kvm_for_each_memslot(memslot, bkt, slots)
1002 stage2_unmap_memslot(kvm, memslot);
1004 write_unlock(&kvm->mmu_lock);
1005 mmap_read_unlock(current->mm);
1006 srcu_read_unlock(&kvm->srcu, idx);
1009 void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
1011 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
1012 struct kvm_pgtable *pgt = NULL;
1014 write_lock(&kvm->mmu_lock);
1019 free_percpu(mmu->last_vcpu_ran);
1021 write_unlock(&kvm->mmu_lock);
1024 kvm_pgtable_stage2_destroy(pgt);
1029 static void hyp_mc_free_fn(void *addr, void *unused)
1031 free_page((unsigned long)addr);
1034 static void *hyp_mc_alloc_fn(void *unused)
1036 return (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
1039 void free_hyp_memcache(struct kvm_hyp_memcache *mc)
1041 if (is_protected_kvm_enabled())
1042 __free_hyp_memcache(mc, hyp_mc_free_fn,
1046 int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages)
1048 if (!is_protected_kvm_enabled())
1051 return __topup_hyp_memcache(mc, min_pages, hyp_mc_alloc_fn,
1056 * kvm_phys_addr_ioremap - map a device range to guest IPA
1058 * @kvm: The KVM pointer
1059 * @guest_ipa: The IPA at which to insert the mapping
1060 * @pa: The physical address of the device
1061 * @size: The size of the mapping
1062 * @writable: Whether or not to create a writable mapping
1064 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
1065 phys_addr_t pa, unsigned long size, bool writable)
1069 struct kvm_mmu_memory_cache cache = { .gfp_zero = __GFP_ZERO };
1070 struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
1071 struct kvm_pgtable *pgt = mmu->pgt;
1072 enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
1073 KVM_PGTABLE_PROT_R |
1074 (writable ? KVM_PGTABLE_PROT_W : 0);
1076 if (is_protected_kvm_enabled())
1079 size += offset_in_page(guest_ipa);
1080 guest_ipa &= PAGE_MASK;
1082 for (addr = guest_ipa; addr < guest_ipa + size; addr += PAGE_SIZE) {
1083 ret = kvm_mmu_topup_memory_cache(&cache,
1084 kvm_mmu_cache_min_pages(mmu));
1088 write_lock(&kvm->mmu_lock);
1089 ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot,
1091 write_unlock(&kvm->mmu_lock);
1098 kvm_mmu_free_memory_cache(&cache);
1103 * stage2_wp_range() - write protect stage2 memory region range
1104 * @mmu: The KVM stage-2 MMU pointer
1105 * @addr: Start address of range
1106 * @end: End address of range
1108 static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
1110 stage2_apply_range_resched(mmu, addr, end, kvm_pgtable_stage2_wrprotect);
1114 * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
1115 * @kvm: The KVM pointer
1116 * @slot: The memory slot to write protect
1118 * Called to start logging dirty pages after memory region
1119 * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
1120 * all present PUD, PMD and PTEs are write protected in the memory region.
1121 * Afterwards read of dirty page log can be called.
1123 * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
1124 * serializing operations for VM memory regions.
1126 static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
1128 struct kvm_memslots *slots = kvm_memslots(kvm);
1129 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
1130 phys_addr_t start, end;
1132 if (WARN_ON_ONCE(!memslot))
1135 start = memslot->base_gfn << PAGE_SHIFT;
1136 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1138 write_lock(&kvm->mmu_lock);
1139 stage2_wp_range(&kvm->arch.mmu, start, end);
1140 write_unlock(&kvm->mmu_lock);
1141 kvm_flush_remote_tlbs_memslot(kvm, memslot);
1145 * kvm_mmu_split_memory_region() - split the stage 2 blocks into PAGE_SIZE
1146 * pages for memory slot
1147 * @kvm: The KVM pointer
1148 * @slot: The memory slot to split
1150 * Acquires kvm->mmu_lock. Called with kvm->slots_lock mutex acquired,
1151 * serializing operations for VM memory regions.
1153 static void kvm_mmu_split_memory_region(struct kvm *kvm, int slot)
1155 struct kvm_memslots *slots;
1156 struct kvm_memory_slot *memslot;
1157 phys_addr_t start, end;
1159 lockdep_assert_held(&kvm->slots_lock);
1161 slots = kvm_memslots(kvm);
1162 memslot = id_to_memslot(slots, slot);
1164 start = memslot->base_gfn << PAGE_SHIFT;
1165 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1167 write_lock(&kvm->mmu_lock);
1168 kvm_mmu_split_huge_pages(kvm, start, end);
1169 write_unlock(&kvm->mmu_lock);
1173 * kvm_arch_mmu_enable_log_dirty_pt_masked() - enable dirty logging for selected pages.
1174 * @kvm: The KVM pointer
1175 * @slot: The memory slot associated with mask
1176 * @gfn_offset: The gfn offset in memory slot
1177 * @mask: The mask of pages at offset 'gfn_offset' in this memory
1178 * slot to enable dirty logging on
1180 * Writes protect selected pages to enable dirty logging, and then
1181 * splits them to PAGE_SIZE. Caller must acquire kvm->mmu_lock.
1183 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1184 struct kvm_memory_slot *slot,
1185 gfn_t gfn_offset, unsigned long mask)
1187 phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
1188 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
1189 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
1191 lockdep_assert_held_write(&kvm->mmu_lock);
1193 stage2_wp_range(&kvm->arch.mmu, start, end);
1196 * Eager-splitting is done when manual-protect is set. We
1197 * also check for initially-all-set because we can avoid
1198 * eager-splitting if initially-all-set is false.
1199 * Initially-all-set equal false implies that huge-pages were
1200 * already split when enabling dirty logging: no need to do it
1203 if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1204 kvm_mmu_split_huge_pages(kvm, start, end);
1207 static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
1209 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
1212 static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
1214 unsigned long map_size)
1217 hva_t uaddr_start, uaddr_end;
1220 /* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */
1221 if (map_size == PAGE_SIZE)
1224 size = memslot->npages * PAGE_SIZE;
1226 gpa_start = memslot->base_gfn << PAGE_SHIFT;
1228 uaddr_start = memslot->userspace_addr;
1229 uaddr_end = uaddr_start + size;
1232 * Pages belonging to memslots that don't have the same alignment
1233 * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2
1234 * PMD/PUD entries, because we'll end up mapping the wrong pages.
1236 * Consider a layout like the following:
1238 * memslot->userspace_addr:
1239 * +-----+--------------------+--------------------+---+
1240 * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz|
1241 * +-----+--------------------+--------------------+---+
1243 * memslot->base_gfn << PAGE_SHIFT:
1244 * +---+--------------------+--------------------+-----+
1245 * |abc|def Stage-2 block | Stage-2 block |tvxyz|
1246 * +---+--------------------+--------------------+-----+
1248 * If we create those stage-2 blocks, we'll end up with this incorrect
1254 if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
1258 * Next, let's make sure we're not trying to map anything not covered
1259 * by the memslot. This means we have to prohibit block size mappings
1260 * for the beginning and end of a non-block aligned and non-block sized
1261 * memory slot (illustrated by the head and tail parts of the
1262 * userspace view above containing pages 'abcde' and 'xyz',
1265 * Note that it doesn't matter if we do the check using the
1266 * userspace_addr or the base_gfn, as both are equally aligned (per
1267 * the check above) and equally sized.
1269 return (hva & ~(map_size - 1)) >= uaddr_start &&
1270 (hva & ~(map_size - 1)) + map_size <= uaddr_end;
1274 * Check if the given hva is backed by a transparent huge page (THP) and
1275 * whether it can be mapped using block mapping in stage2. If so, adjust
1276 * the stage2 PFN and IPA accordingly. Only PMD_SIZE THPs are currently
1277 * supported. This will need to be updated to support other THP sizes.
1279 * Returns the size of the mapping.
1282 transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
1283 unsigned long hva, kvm_pfn_t *pfnp,
1286 kvm_pfn_t pfn = *pfnp;
1289 * Make sure the adjustment is done only for THP pages. Also make
1290 * sure that the HVA and IPA are sufficiently aligned and that the
1291 * block map is contained within the memslot.
1293 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
1294 int sz = get_user_mapping_size(kvm, hva);
1303 pfn &= ~(PTRS_PER_PMD - 1);
1309 /* Use page mapping if we cannot use block mapping. */
1313 static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva)
1317 if (is_vm_hugetlb_page(vma) && !(vma->vm_flags & VM_PFNMAP))
1318 return huge_page_shift(hstate_vma(vma));
1320 if (!(vma->vm_flags & VM_PFNMAP))
1323 VM_BUG_ON(is_vm_hugetlb_page(vma));
1325 pa = (vma->vm_pgoff << PAGE_SHIFT) + (hva - vma->vm_start);
1327 #ifndef __PAGETABLE_PMD_FOLDED
1328 if ((hva & (PUD_SIZE - 1)) == (pa & (PUD_SIZE - 1)) &&
1329 ALIGN_DOWN(hva, PUD_SIZE) >= vma->vm_start &&
1330 ALIGN(hva, PUD_SIZE) <= vma->vm_end)
1334 if ((hva & (PMD_SIZE - 1)) == (pa & (PMD_SIZE - 1)) &&
1335 ALIGN_DOWN(hva, PMD_SIZE) >= vma->vm_start &&
1336 ALIGN(hva, PMD_SIZE) <= vma->vm_end)
1343 * The page will be mapped in stage 2 as Normal Cacheable, so the VM will be
1344 * able to see the page's tags and therefore they must be initialised first. If
1345 * PG_mte_tagged is set, tags have already been initialised.
1347 * The race in the test/set of the PG_mte_tagged flag is handled by:
1348 * - preventing VM_SHARED mappings in a memslot with MTE preventing two VMs
1349 * racing to santise the same page
1350 * - mmap_lock protects between a VM faulting a page in and the VMM performing
1351 * an mprotect() to add VM_MTE
1353 static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
1356 unsigned long i, nr_pages = size >> PAGE_SHIFT;
1357 struct page *page = pfn_to_page(pfn);
1359 if (!kvm_has_mte(kvm))
1362 for (i = 0; i < nr_pages; i++, page++) {
1363 if (try_page_mte_tagging(page)) {
1364 mte_clear_page_tags(page_address(page));
1365 set_page_mte_tagged(page);
1370 static bool kvm_vma_mte_allowed(struct vm_area_struct *vma)
1372 return vma->vm_flags & VM_MTE_ALLOWED;
1375 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1376 struct kvm_memory_slot *memslot, unsigned long hva,
1377 unsigned long fault_status)
1380 bool write_fault, writable, force_pte = false;
1381 bool exec_fault, mte_allowed;
1382 bool device = false;
1383 unsigned long mmu_seq;
1384 struct kvm *kvm = vcpu->kvm;
1385 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
1386 struct vm_area_struct *vma;
1390 bool logging_active = memslot_is_logging(memslot);
1391 unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
1392 long vma_pagesize, fault_granule;
1393 enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
1394 struct kvm_pgtable *pgt;
1396 fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level);
1397 write_fault = kvm_is_write_fault(vcpu);
1398 exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
1399 VM_BUG_ON(write_fault && exec_fault);
1401 if (fault_status == ESR_ELx_FSC_PERM && !write_fault && !exec_fault) {
1402 kvm_err("Unexpected L2 read permission error\n");
1407 * Permission faults just need to update the existing leaf entry,
1408 * and so normally don't require allocations from the memcache. The
1409 * only exception to this is when dirty logging is enabled at runtime
1410 * and a write fault needs to collapse a block entry into a table.
1412 if (fault_status != ESR_ELx_FSC_PERM ||
1413 (logging_active && write_fault)) {
1414 ret = kvm_mmu_topup_memory_cache(memcache,
1415 kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu));
1421 * Let's check if we will get back a huge page backed by hugetlbfs, or
1422 * get block mapping for device MMIO region.
1424 mmap_read_lock(current->mm);
1425 vma = vma_lookup(current->mm, hva);
1426 if (unlikely(!vma)) {
1427 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
1428 mmap_read_unlock(current->mm);
1433 * logging_active is guaranteed to never be true for VM_PFNMAP
1436 if (logging_active) {
1438 vma_shift = PAGE_SHIFT;
1440 vma_shift = get_vma_page_shift(vma, hva);
1443 switch (vma_shift) {
1444 #ifndef __PAGETABLE_PMD_FOLDED
1446 if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
1450 case CONT_PMD_SHIFT:
1451 vma_shift = PMD_SHIFT;
1454 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
1457 case CONT_PTE_SHIFT:
1458 vma_shift = PAGE_SHIFT;
1464 WARN_ONCE(1, "Unknown vma_shift %d", vma_shift);
1467 vma_pagesize = 1UL << vma_shift;
1468 if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
1469 fault_ipa &= ~(vma_pagesize - 1);
1471 gfn = fault_ipa >> PAGE_SHIFT;
1472 mte_allowed = kvm_vma_mte_allowed(vma);
1474 /* Don't use the VMA after the unlock -- it may have vanished */
1478 * Read mmu_invalidate_seq so that KVM can detect if the results of
1479 * vma_lookup() or __gfn_to_pfn_memslot() become stale prior to
1480 * acquiring kvm->mmu_lock.
1482 * Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs
1483 * with the smp_wmb() in kvm_mmu_invalidate_end().
1485 mmu_seq = vcpu->kvm->mmu_invalidate_seq;
1486 mmap_read_unlock(current->mm);
1488 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
1489 write_fault, &writable, NULL);
1490 if (pfn == KVM_PFN_ERR_HWPOISON) {
1491 kvm_send_hwpoison_signal(hva, vma_shift);
1494 if (is_error_noslot_pfn(pfn))
1497 if (kvm_is_device_pfn(pfn)) {
1499 * If the page was identified as device early by looking at
1500 * the VMA flags, vma_pagesize is already representing the
1501 * largest quantity we can map. If instead it was mapped
1502 * via gfn_to_pfn_prot(), vma_pagesize is set to PAGE_SIZE
1503 * and must not be upgraded.
1505 * In both cases, we don't let transparent_hugepage_adjust()
1506 * change things at the last minute.
1509 } else if (logging_active && !write_fault) {
1511 * Only actually map the page as writable if this was a write
1517 if (exec_fault && device)
1520 read_lock(&kvm->mmu_lock);
1521 pgt = vcpu->arch.hw_mmu->pgt;
1522 if (mmu_invalidate_retry(kvm, mmu_seq))
1526 * If we are not forced to use page mapping, check if we are
1527 * backed by a THP and thus use block mapping if possible.
1529 if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) {
1530 if (fault_status == ESR_ELx_FSC_PERM &&
1531 fault_granule > PAGE_SIZE)
1532 vma_pagesize = fault_granule;
1534 vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
1538 if (vma_pagesize < 0) {
1544 if (fault_status != ESR_ELx_FSC_PERM && !device && kvm_has_mte(kvm)) {
1545 /* Check the VMM hasn't introduced a new disallowed VMA */
1547 sanitise_mte_tags(kvm, pfn, vma_pagesize);
1555 prot |= KVM_PGTABLE_PROT_W;
1558 prot |= KVM_PGTABLE_PROT_X;
1561 prot |= KVM_PGTABLE_PROT_DEVICE;
1562 else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC))
1563 prot |= KVM_PGTABLE_PROT_X;
1566 * Under the premise of getting a FSC_PERM fault, we just need to relax
1567 * permissions only if vma_pagesize equals fault_granule. Otherwise,
1568 * kvm_pgtable_stage2_map() should be called to change block size.
1570 if (fault_status == ESR_ELx_FSC_PERM && vma_pagesize == fault_granule)
1571 ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
1573 ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
1574 __pfn_to_phys(pfn), prot,
1576 KVM_PGTABLE_WALK_HANDLE_FAULT |
1577 KVM_PGTABLE_WALK_SHARED);
1579 /* Mark the page dirty only if the fault is handled successfully */
1580 if (writable && !ret) {
1581 kvm_set_pfn_dirty(pfn);
1582 mark_page_dirty_in_slot(kvm, memslot, gfn);
1586 read_unlock(&kvm->mmu_lock);
1587 kvm_release_pfn_clean(pfn);
1588 return ret != -EAGAIN ? ret : 0;
1591 /* Resolve the access fault by making the page young again. */
1592 static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
1595 struct kvm_s2_mmu *mmu;
1597 trace_kvm_access_fault(fault_ipa);
1599 read_lock(&vcpu->kvm->mmu_lock);
1600 mmu = vcpu->arch.hw_mmu;
1601 pte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa);
1602 read_unlock(&vcpu->kvm->mmu_lock);
1604 if (kvm_pte_valid(pte))
1605 kvm_set_pfn_accessed(kvm_pte_to_pfn(pte));
1609 * kvm_handle_guest_abort - handles all 2nd stage aborts
1610 * @vcpu: the VCPU pointer
1612 * Any abort that gets to the host is almost guaranteed to be caused by a
1613 * missing second stage translation table entry, which can mean that either the
1614 * guest simply needs more memory and we must allocate an appropriate page or it
1615 * can mean that the guest tried to access I/O memory, which is emulated by user
1616 * space. The distinction is based on the IPA causing the fault and whether this
1617 * memory region has been registered as standard RAM by user space.
1619 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
1621 unsigned long fault_status;
1622 phys_addr_t fault_ipa;
1623 struct kvm_memory_slot *memslot;
1625 bool is_iabt, write_fault, writable;
1629 fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
1631 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
1632 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
1634 if (fault_status == ESR_ELx_FSC_FAULT) {
1635 /* Beyond sanitised PARange (which is the IPA limit) */
1636 if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) {
1637 kvm_inject_size_fault(vcpu);
1641 /* Falls between the IPA range and the PARange? */
1642 if (fault_ipa >= BIT_ULL(vcpu->arch.hw_mmu->pgt->ia_bits)) {
1643 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
1646 kvm_inject_pabt(vcpu, fault_ipa);
1648 kvm_inject_dabt(vcpu, fault_ipa);
1653 /* Synchronous External Abort? */
1654 if (kvm_vcpu_abt_issea(vcpu)) {
1656 * For RAS the host kernel may handle this abort.
1657 * There is no need to pass the error into the guest.
1659 if (kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu)))
1660 kvm_inject_vabt(vcpu);
1665 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
1666 kvm_vcpu_get_hfar(vcpu), fault_ipa);
1668 /* Check the stage-2 fault is trans. fault or write fault */
1669 if (fault_status != ESR_ELx_FSC_FAULT &&
1670 fault_status != ESR_ELx_FSC_PERM &&
1671 fault_status != ESR_ELx_FSC_ACCESS) {
1672 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1673 kvm_vcpu_trap_get_class(vcpu),
1674 (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
1675 (unsigned long)kvm_vcpu_get_esr(vcpu));
1679 idx = srcu_read_lock(&vcpu->kvm->srcu);
1681 gfn = fault_ipa >> PAGE_SHIFT;
1682 memslot = gfn_to_memslot(vcpu->kvm, gfn);
1683 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
1684 write_fault = kvm_is_write_fault(vcpu);
1685 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
1687 * The guest has put either its instructions or its page-tables
1688 * somewhere it shouldn't have. Userspace won't be able to do
1689 * anything about this (there's no syndrome for a start), so
1690 * re-inject the abort back into the guest.
1697 if (kvm_vcpu_abt_iss1tw(vcpu)) {
1698 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1704 * Check for a cache maintenance operation. Since we
1705 * ended-up here, we know it is outside of any memory
1706 * slot. But we can't find out if that is for a device,
1707 * or if the guest is just being stupid. The only thing
1708 * we know for sure is that this range cannot be cached.
1710 * So let's assume that the guest is just being
1711 * cautious, and skip the instruction.
1713 if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) {
1720 * The IPA is reported as [MAX:12], so we need to
1721 * complement it with the bottom 12 bits from the
1722 * faulting VA. This is always 12 bits, irrespective
1725 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
1726 ret = io_mem_abort(vcpu, fault_ipa);
1730 /* Userspace should not be able to register out-of-bounds IPAs */
1731 VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->arch.hw_mmu));
1733 if (fault_status == ESR_ELx_FSC_ACCESS) {
1734 handle_access_fault(vcpu, fault_ipa);
1739 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
1743 if (ret == -ENOEXEC) {
1744 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1748 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1752 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1754 if (!kvm->arch.mmu.pgt)
1757 __unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT,
1758 (range->end - range->start) << PAGE_SHIFT,
1764 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1766 kvm_pfn_t pfn = pte_pfn(range->arg.pte);
1768 if (!kvm->arch.mmu.pgt)
1771 WARN_ON(range->end - range->start != 1);
1774 * If the page isn't tagged, defer to user_mem_abort() for sanitising
1775 * the MTE tags. The S2 pte should have been unmapped by
1776 * mmu_notifier_invalidate_range_end().
1778 if (kvm_has_mte(kvm) && !page_mte_tagged(pfn_to_page(pfn)))
1782 * We've moved a page around, probably through CoW, so let's treat
1783 * it just like a translation fault and the map handler will clean
1784 * the cache to the PoC.
1786 * The MMU notifiers will have unmapped a huge PMD before calling
1787 * ->change_pte() (which in turn calls kvm_set_spte_gfn()) and
1788 * therefore we never need to clear out a huge PMD through this
1789 * calling path and a memcache is not required.
1791 kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT,
1792 PAGE_SIZE, __pfn_to_phys(pfn),
1793 KVM_PGTABLE_PROT_R, NULL, 0);
1798 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1800 u64 size = (range->end - range->start) << PAGE_SHIFT;
1802 if (!kvm->arch.mmu.pgt)
1805 return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt,
1806 range->start << PAGE_SHIFT,
1810 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1812 u64 size = (range->end - range->start) << PAGE_SHIFT;
1814 if (!kvm->arch.mmu.pgt)
1817 return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt,
1818 range->start << PAGE_SHIFT,
1822 phys_addr_t kvm_mmu_get_httbr(void)
1824 return __pa(hyp_pgtable->pgd);
1827 phys_addr_t kvm_get_idmap_vector(void)
1829 return hyp_idmap_vector;
1832 static int kvm_map_idmap_text(void)
1834 unsigned long size = hyp_idmap_end - hyp_idmap_start;
1835 int err = __create_hyp_mappings(hyp_idmap_start, size, hyp_idmap_start,
1838 kvm_err("Failed to idmap %lx-%lx\n",
1839 hyp_idmap_start, hyp_idmap_end);
1844 static void *kvm_hyp_zalloc_page(void *arg)
1846 return (void *)get_zeroed_page(GFP_KERNEL);
1849 static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = {
1850 .zalloc_page = kvm_hyp_zalloc_page,
1851 .get_page = kvm_host_get_page,
1852 .put_page = kvm_host_put_page,
1853 .phys_to_virt = kvm_host_va,
1854 .virt_to_phys = kvm_host_pa,
1857 int __init kvm_mmu_init(u32 *hyp_va_bits)
1863 hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start);
1864 hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
1865 hyp_idmap_end = __pa_symbol(__hyp_idmap_text_end);
1866 hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE);
1867 hyp_idmap_vector = __pa_symbol(__kvm_hyp_init);
1870 * We rely on the linker script to ensure at build time that the HYP
1871 * init code does not cross a page boundary.
1873 BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
1876 * The ID map may be configured to use an extended virtual address
1877 * range. This is only the case if system RAM is out of range for the
1878 * currently configured page size and VA_BITS_MIN, in which case we will
1879 * also need the extended virtual range for the HYP ID map, or we won't
1880 * be able to enable the EL2 MMU.
1882 * However, in some cases the ID map may be configured for fewer than
1883 * the number of VA bits used by the regular kernel stage 1. This
1884 * happens when VA_BITS=52 and the kernel image is placed in PA space
1887 * At EL2, there is only one TTBR register, and we can't switch between
1888 * translation tables *and* update TCR_EL2.T0SZ at the same time. Bottom
1889 * line: we need to use the extended range with *both* our translation
1892 * So use the maximum of the idmap VA bits and the regular kernel stage
1893 * 1 VA bits to assure that the hypervisor can both ID map its code page
1894 * and map any kernel memory.
1896 idmap_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET);
1897 kernel_bits = vabits_actual;
1898 *hyp_va_bits = max(idmap_bits, kernel_bits);
1900 kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits);
1901 kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
1902 kvm_debug("HYP VA range: %lx:%lx\n",
1903 kern_hyp_va(PAGE_OFFSET),
1904 kern_hyp_va((unsigned long)high_memory - 1));
1906 if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
1907 hyp_idmap_start < kern_hyp_va((unsigned long)high_memory - 1) &&
1908 hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
1910 * The idmap page is intersecting with the VA space,
1911 * it is not safe to continue further.
1913 kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
1918 hyp_pgtable = kzalloc(sizeof(*hyp_pgtable), GFP_KERNEL);
1920 kvm_err("Hyp mode page-table not allocated\n");
1925 err = kvm_pgtable_hyp_init(hyp_pgtable, *hyp_va_bits, &kvm_hyp_mm_ops);
1927 goto out_free_pgtable;
1929 err = kvm_map_idmap_text();
1931 goto out_destroy_pgtable;
1933 io_map_base = hyp_idmap_start;
1936 out_destroy_pgtable:
1937 kvm_pgtable_hyp_destroy(hyp_pgtable);
1945 void kvm_arch_commit_memory_region(struct kvm *kvm,
1946 struct kvm_memory_slot *old,
1947 const struct kvm_memory_slot *new,
1948 enum kvm_mr_change change)
1950 bool log_dirty_pages = new && new->flags & KVM_MEM_LOG_DIRTY_PAGES;
1953 * At this point memslot has been committed and there is an
1954 * allocated dirty_bitmap[], dirty pages will be tracked while the
1955 * memory slot is write protected.
1957 if (log_dirty_pages) {
1959 if (change == KVM_MR_DELETE)
1963 * Huge and normal pages are write-protected and split
1964 * on either of these two cases:
1966 * 1. with initial-all-set: gradually with CLEAR ioctls,
1968 if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1972 * 2. without initial-all-set: all in one shot when
1973 * enabling dirty logging.
1975 kvm_mmu_wp_memory_region(kvm, new->id);
1976 kvm_mmu_split_memory_region(kvm, new->id);
1979 * Free any leftovers from the eager page splitting cache. Do
1980 * this when deleting, moving, disabling dirty logging, or
1981 * creating the memslot (a nop). Doing it for deletes makes
1982 * sure we don't leak memory, and there's no need to keep the
1983 * cache around for any of the other cases.
1985 kvm_mmu_free_memory_cache(&kvm->arch.mmu.split_page_cache);
1989 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1990 const struct kvm_memory_slot *old,
1991 struct kvm_memory_slot *new,
1992 enum kvm_mr_change change)
1997 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
1998 change != KVM_MR_FLAGS_ONLY)
2002 * Prevent userspace from creating a memory region outside of the IPA
2003 * space addressable by the KVM guest IPA space.
2005 if ((new->base_gfn + new->npages) > (kvm_phys_size(&kvm->arch.mmu) >> PAGE_SHIFT))
2008 hva = new->userspace_addr;
2009 reg_end = hva + (new->npages << PAGE_SHIFT);
2011 mmap_read_lock(current->mm);
2013 * A memory region could potentially cover multiple VMAs, and any holes
2014 * between them, so iterate over all of them.
2016 * +--------------------------------------------+
2017 * +---------------+----------------+ +----------------+
2018 * | : VMA 1 | VMA 2 | | VMA 3 : |
2019 * +---------------+----------------+ +----------------+
2021 * +--------------------------------------------+
2024 struct vm_area_struct *vma;
2026 vma = find_vma_intersection(current->mm, hva, reg_end);
2030 if (kvm_has_mte(kvm) && !kvm_vma_mte_allowed(vma)) {
2035 if (vma->vm_flags & VM_PFNMAP) {
2036 /* IO region dirty page logging not allowed */
2037 if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
2042 hva = min(reg_end, vma->vm_end);
2043 } while (hva < reg_end);
2045 mmap_read_unlock(current->mm);
2049 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
2053 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
2057 void kvm_arch_flush_shadow_all(struct kvm *kvm)
2059 kvm_uninit_stage2_mmu(kvm);
2062 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
2063 struct kvm_memory_slot *slot)
2065 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
2066 phys_addr_t size = slot->npages << PAGE_SHIFT;
2068 write_lock(&kvm->mmu_lock);
2069 unmap_stage2_range(&kvm->arch.mmu, gpa, size);
2070 write_unlock(&kvm->mmu_lock);
2074 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
2077 * - S/W ops are local to a CPU (not broadcast)
2078 * - We have line migration behind our back (speculation)
2079 * - System caches don't support S/W at all (damn!)
2081 * In the face of the above, the best we can do is to try and convert
2082 * S/W ops to VA ops. Because the guest is not allowed to infer the
2083 * S/W to PA mapping, it can only use S/W to nuke the whole cache,
2084 * which is a rather good thing for us.
2086 * Also, it is only used when turning caches on/off ("The expected
2087 * usage of the cache maintenance instructions that operate by set/way
2088 * is associated with the cache maintenance instructions associated
2089 * with the powerdown and powerup of caches, if this is required by
2090 * the implementation.").
2092 * We use the following policy:
2094 * - If we trap a S/W operation, we enable VM trapping to detect
2095 * caches being turned on/off, and do a full clean.
2097 * - We flush the caches on both caches being turned on and off.
2099 * - Once the caches are enabled, we stop trapping VM ops.
2101 void kvm_set_way_flush(struct kvm_vcpu *vcpu)
2103 unsigned long hcr = *vcpu_hcr(vcpu);
2106 * If this is the first time we do a S/W operation
2107 * (i.e. HCR_TVM not set) flush the whole memory, and set the
2110 * Otherwise, rely on the VM trapping to wait for the MMU +
2111 * Caches to be turned off. At that point, we'll be able to
2112 * clean the caches again.
2114 if (!(hcr & HCR_TVM)) {
2115 trace_kvm_set_way_flush(*vcpu_pc(vcpu),
2116 vcpu_has_cache_enabled(vcpu));
2117 stage2_flush_vm(vcpu->kvm);
2118 *vcpu_hcr(vcpu) = hcr | HCR_TVM;
2122 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
2124 bool now_enabled = vcpu_has_cache_enabled(vcpu);
2127 * If switching the MMU+caches on, need to invalidate the caches.
2128 * If switching it off, need to clean the caches.
2129 * Clean + invalidate does the trick always.
2131 if (now_enabled != was_enabled)
2132 stage2_flush_vm(vcpu->kvm);
2134 /* Caches are now on, stop trapping VM ops (until a S/W op) */
2136 *vcpu_hcr(vcpu) &= ~HCR_TVM;
2138 trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);