2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 #include <linux/mman.h>
20 #include <linux/kvm_host.h>
22 #include <linux/hugetlb.h>
23 #include <trace/events/kvm.h>
24 #include <asm/pgalloc.h>
25 #include <asm/cacheflush.h>
26 #include <asm/kvm_arm.h>
27 #include <asm/kvm_mmu.h>
28 #include <asm/kvm_mmio.h>
29 #include <asm/kvm_asm.h>
30 #include <asm/kvm_emulate.h>
35 static pgd_t *boot_hyp_pgd;
36 static pgd_t *hyp_pgd;
37 static pgd_t *merged_hyp_pgd;
38 static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
40 static unsigned long hyp_idmap_start;
41 static unsigned long hyp_idmap_end;
42 static phys_addr_t hyp_idmap_vector;
44 #define S2_PGD_SIZE (PTRS_PER_S2_PGD * sizeof(pgd_t))
45 #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
47 #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
48 #define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1)
50 static bool memslot_is_logging(struct kvm_memory_slot *memslot)
52 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
56 * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
57 * @kvm: pointer to kvm structure.
59 * Interface to HYP function to flush all VM TLB entries
61 void kvm_flush_remote_tlbs(struct kvm *kvm)
63 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
66 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
68 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
72 * D-Cache management functions. They take the page table entries by
73 * value, as they are flushing the cache using the kernel mapping (or
76 static void kvm_flush_dcache_pte(pte_t pte)
78 __kvm_flush_dcache_pte(pte);
81 static void kvm_flush_dcache_pmd(pmd_t pmd)
83 __kvm_flush_dcache_pmd(pmd);
86 static void kvm_flush_dcache_pud(pud_t pud)
88 __kvm_flush_dcache_pud(pud);
91 static bool kvm_is_device_pfn(unsigned long pfn)
93 return !pfn_valid(pfn);
97 * stage2_dissolve_pmd() - clear and flush huge PMD entry
98 * @kvm: pointer to kvm structure.
100 * @pmd: pmd pointer for IPA
102 * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all
103 * pages in the range dirty.
105 static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
107 if (!pmd_thp_or_huge(*pmd))
111 kvm_tlb_flush_vmid_ipa(kvm, addr);
112 put_page(virt_to_page(pmd));
115 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
120 BUG_ON(max > KVM_NR_MEM_OBJS);
121 if (cache->nobjs >= min)
123 while (cache->nobjs < max) {
124 page = (void *)__get_free_page(PGALLOC_GFP);
127 cache->objects[cache->nobjs++] = page;
132 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
135 free_page((unsigned long)mc->objects[--mc->nobjs]);
138 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
142 BUG_ON(!mc || !mc->nobjs);
143 p = mc->objects[--mc->nobjs];
147 static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
149 pud_t *pud_table __maybe_unused = stage2_pud_offset(pgd, 0UL);
150 stage2_pgd_clear(pgd);
151 kvm_tlb_flush_vmid_ipa(kvm, addr);
152 stage2_pud_free(pud_table);
153 put_page(virt_to_page(pgd));
156 static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
158 pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(pud, 0);
159 VM_BUG_ON(stage2_pud_huge(*pud));
160 stage2_pud_clear(pud);
161 kvm_tlb_flush_vmid_ipa(kvm, addr);
162 stage2_pmd_free(pmd_table);
163 put_page(virt_to_page(pud));
166 static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
168 pte_t *pte_table = pte_offset_kernel(pmd, 0);
169 VM_BUG_ON(pmd_thp_or_huge(*pmd));
171 kvm_tlb_flush_vmid_ipa(kvm, addr);
172 pte_free_kernel(NULL, pte_table);
173 put_page(virt_to_page(pmd));
177 * Unmapping vs dcache management:
179 * If a guest maps certain memory pages as uncached, all writes will
180 * bypass the data cache and go directly to RAM. However, the CPUs
181 * can still speculate reads (not writes) and fill cache lines with
184 * Those cache lines will be *clean* cache lines though, so a
185 * clean+invalidate operation is equivalent to an invalidate
186 * operation, because no cache lines are marked dirty.
188 * Those clean cache lines could be filled prior to an uncached write
189 * by the guest, and the cache coherent IO subsystem would therefore
190 * end up writing old data to disk.
192 * This is why right after unmapping a page/section and invalidating
193 * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
194 * the IO subsystem will never hit in the cache.
196 static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
197 phys_addr_t addr, phys_addr_t end)
199 phys_addr_t start_addr = addr;
200 pte_t *pte, *start_pte;
202 start_pte = pte = pte_offset_kernel(pmd, addr);
204 if (!pte_none(*pte)) {
205 pte_t old_pte = *pte;
207 kvm_set_pte(pte, __pte(0));
208 kvm_tlb_flush_vmid_ipa(kvm, addr);
210 /* No need to invalidate the cache for device mappings */
211 if (!kvm_is_device_pfn(pte_pfn(old_pte)))
212 kvm_flush_dcache_pte(old_pte);
214 put_page(virt_to_page(pte));
216 } while (pte++, addr += PAGE_SIZE, addr != end);
218 if (stage2_pte_table_empty(start_pte))
219 clear_stage2_pmd_entry(kvm, pmd, start_addr);
222 static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
223 phys_addr_t addr, phys_addr_t end)
225 phys_addr_t next, start_addr = addr;
226 pmd_t *pmd, *start_pmd;
228 start_pmd = pmd = stage2_pmd_offset(pud, addr);
230 next = stage2_pmd_addr_end(addr, end);
231 if (!pmd_none(*pmd)) {
232 if (pmd_thp_or_huge(*pmd)) {
233 pmd_t old_pmd = *pmd;
236 kvm_tlb_flush_vmid_ipa(kvm, addr);
238 kvm_flush_dcache_pmd(old_pmd);
240 put_page(virt_to_page(pmd));
242 unmap_stage2_ptes(kvm, pmd, addr, next);
245 } while (pmd++, addr = next, addr != end);
247 if (stage2_pmd_table_empty(start_pmd))
248 clear_stage2_pud_entry(kvm, pud, start_addr);
251 static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
252 phys_addr_t addr, phys_addr_t end)
254 phys_addr_t next, start_addr = addr;
255 pud_t *pud, *start_pud;
257 start_pud = pud = stage2_pud_offset(pgd, addr);
259 next = stage2_pud_addr_end(addr, end);
260 if (!stage2_pud_none(*pud)) {
261 if (stage2_pud_huge(*pud)) {
262 pud_t old_pud = *pud;
264 stage2_pud_clear(pud);
265 kvm_tlb_flush_vmid_ipa(kvm, addr);
266 kvm_flush_dcache_pud(old_pud);
267 put_page(virt_to_page(pud));
269 unmap_stage2_pmds(kvm, pud, addr, next);
272 } while (pud++, addr = next, addr != end);
274 if (stage2_pud_table_empty(start_pud))
275 clear_stage2_pgd_entry(kvm, pgd, start_addr);
279 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
280 * @kvm: The VM pointer
281 * @start: The intermediate physical base address of the range to unmap
282 * @size: The size of the area to unmap
284 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
285 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
286 * destroying the VM), otherwise another faulting VCPU may come in and mess
287 * with things behind our backs.
289 static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
292 phys_addr_t addr = start, end = start + size;
295 assert_spin_locked(&kvm->mmu_lock);
296 pgd = kvm->arch.pgd + stage2_pgd_index(addr);
298 next = stage2_pgd_addr_end(addr, end);
299 if (!stage2_pgd_none(*pgd))
300 unmap_stage2_puds(kvm, pgd, addr, next);
301 } while (pgd++, addr = next, addr != end);
304 static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
305 phys_addr_t addr, phys_addr_t end)
309 pte = pte_offset_kernel(pmd, addr);
311 if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
312 kvm_flush_dcache_pte(*pte);
313 } while (pte++, addr += PAGE_SIZE, addr != end);
316 static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
317 phys_addr_t addr, phys_addr_t end)
322 pmd = stage2_pmd_offset(pud, addr);
324 next = stage2_pmd_addr_end(addr, end);
325 if (!pmd_none(*pmd)) {
326 if (pmd_thp_or_huge(*pmd))
327 kvm_flush_dcache_pmd(*pmd);
329 stage2_flush_ptes(kvm, pmd, addr, next);
331 } while (pmd++, addr = next, addr != end);
334 static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
335 phys_addr_t addr, phys_addr_t end)
340 pud = stage2_pud_offset(pgd, addr);
342 next = stage2_pud_addr_end(addr, end);
343 if (!stage2_pud_none(*pud)) {
344 if (stage2_pud_huge(*pud))
345 kvm_flush_dcache_pud(*pud);
347 stage2_flush_pmds(kvm, pud, addr, next);
349 } while (pud++, addr = next, addr != end);
352 static void stage2_flush_memslot(struct kvm *kvm,
353 struct kvm_memory_slot *memslot)
355 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
356 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
360 pgd = kvm->arch.pgd + stage2_pgd_index(addr);
362 next = stage2_pgd_addr_end(addr, end);
363 if (!stage2_pgd_none(*pgd))
364 stage2_flush_puds(kvm, pgd, addr, next);
365 } while (pgd++, addr = next, addr != end);
369 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
370 * @kvm: The struct kvm pointer
372 * Go through the stage 2 page tables and invalidate any cache lines
373 * backing memory already mapped to the VM.
375 static void stage2_flush_vm(struct kvm *kvm)
377 struct kvm_memslots *slots;
378 struct kvm_memory_slot *memslot;
381 idx = srcu_read_lock(&kvm->srcu);
382 spin_lock(&kvm->mmu_lock);
384 slots = kvm_memslots(kvm);
385 kvm_for_each_memslot(memslot, slots)
386 stage2_flush_memslot(kvm, memslot);
388 spin_unlock(&kvm->mmu_lock);
389 srcu_read_unlock(&kvm->srcu, idx);
392 static void clear_hyp_pgd_entry(pgd_t *pgd)
394 pud_t *pud_table __maybe_unused = pud_offset(pgd, 0UL);
396 pud_free(NULL, pud_table);
397 put_page(virt_to_page(pgd));
400 static void clear_hyp_pud_entry(pud_t *pud)
402 pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0);
403 VM_BUG_ON(pud_huge(*pud));
405 pmd_free(NULL, pmd_table);
406 put_page(virt_to_page(pud));
409 static void clear_hyp_pmd_entry(pmd_t *pmd)
411 pte_t *pte_table = pte_offset_kernel(pmd, 0);
412 VM_BUG_ON(pmd_thp_or_huge(*pmd));
414 pte_free_kernel(NULL, pte_table);
415 put_page(virt_to_page(pmd));
418 static void unmap_hyp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
420 pte_t *pte, *start_pte;
422 start_pte = pte = pte_offset_kernel(pmd, addr);
424 if (!pte_none(*pte)) {
425 kvm_set_pte(pte, __pte(0));
426 put_page(virt_to_page(pte));
428 } while (pte++, addr += PAGE_SIZE, addr != end);
430 if (hyp_pte_table_empty(start_pte))
431 clear_hyp_pmd_entry(pmd);
434 static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
437 pmd_t *pmd, *start_pmd;
439 start_pmd = pmd = pmd_offset(pud, addr);
441 next = pmd_addr_end(addr, end);
442 /* Hyp doesn't use huge pmds */
444 unmap_hyp_ptes(pmd, addr, next);
445 } while (pmd++, addr = next, addr != end);
447 if (hyp_pmd_table_empty(start_pmd))
448 clear_hyp_pud_entry(pud);
451 static void unmap_hyp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
454 pud_t *pud, *start_pud;
456 start_pud = pud = pud_offset(pgd, addr);
458 next = pud_addr_end(addr, end);
459 /* Hyp doesn't use huge puds */
461 unmap_hyp_pmds(pud, addr, next);
462 } while (pud++, addr = next, addr != end);
464 if (hyp_pud_table_empty(start_pud))
465 clear_hyp_pgd_entry(pgd);
468 static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
471 phys_addr_t addr = start, end = start + size;
475 * We don't unmap anything from HYP, except at the hyp tear down.
476 * Hence, we don't have to invalidate the TLBs here.
478 pgd = pgdp + pgd_index(addr);
480 next = pgd_addr_end(addr, end);
482 unmap_hyp_puds(pgd, addr, next);
483 } while (pgd++, addr = next, addr != end);
487 * free_hyp_pgds - free Hyp-mode page tables
489 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
490 * therefore contains either mappings in the kernel memory area (above
491 * PAGE_OFFSET), or device mappings in the vmalloc range (from
492 * VMALLOC_START to VMALLOC_END).
494 * boot_hyp_pgd should only map two pages for the init code.
496 void free_hyp_pgds(void)
500 mutex_lock(&kvm_hyp_pgd_mutex);
503 unmap_hyp_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
504 free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
509 unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE);
510 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
511 unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
512 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
513 unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
515 free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
518 if (merged_hyp_pgd) {
519 clear_page(merged_hyp_pgd);
520 free_page((unsigned long)merged_hyp_pgd);
521 merged_hyp_pgd = NULL;
524 mutex_unlock(&kvm_hyp_pgd_mutex);
527 static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
528 unsigned long end, unsigned long pfn,
536 pte = pte_offset_kernel(pmd, addr);
537 kvm_set_pte(pte, pfn_pte(pfn, prot));
538 get_page(virt_to_page(pte));
539 kvm_flush_dcache_to_poc(pte, sizeof(*pte));
541 } while (addr += PAGE_SIZE, addr != end);
544 static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
545 unsigned long end, unsigned long pfn,
550 unsigned long addr, next;
554 pmd = pmd_offset(pud, addr);
556 BUG_ON(pmd_sect(*pmd));
558 if (pmd_none(*pmd)) {
559 pte = pte_alloc_one_kernel(NULL, addr);
561 kvm_err("Cannot allocate Hyp pte\n");
564 pmd_populate_kernel(NULL, pmd, pte);
565 get_page(virt_to_page(pmd));
566 kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
569 next = pmd_addr_end(addr, end);
571 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
572 pfn += (next - addr) >> PAGE_SHIFT;
573 } while (addr = next, addr != end);
578 static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
579 unsigned long end, unsigned long pfn,
584 unsigned long addr, next;
589 pud = pud_offset(pgd, addr);
591 if (pud_none_or_clear_bad(pud)) {
592 pmd = pmd_alloc_one(NULL, addr);
594 kvm_err("Cannot allocate Hyp pmd\n");
597 pud_populate(NULL, pud, pmd);
598 get_page(virt_to_page(pud));
599 kvm_flush_dcache_to_poc(pud, sizeof(*pud));
602 next = pud_addr_end(addr, end);
603 ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
606 pfn += (next - addr) >> PAGE_SHIFT;
607 } while (addr = next, addr != end);
612 static int __create_hyp_mappings(pgd_t *pgdp,
613 unsigned long start, unsigned long end,
614 unsigned long pfn, pgprot_t prot)
618 unsigned long addr, next;
621 mutex_lock(&kvm_hyp_pgd_mutex);
622 addr = start & PAGE_MASK;
623 end = PAGE_ALIGN(end);
625 pgd = pgdp + pgd_index(addr);
627 if (pgd_none(*pgd)) {
628 pud = pud_alloc_one(NULL, addr);
630 kvm_err("Cannot allocate Hyp pud\n");
634 pgd_populate(NULL, pgd, pud);
635 get_page(virt_to_page(pgd));
636 kvm_flush_dcache_to_poc(pgd, sizeof(*pgd));
639 next = pgd_addr_end(addr, end);
640 err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
643 pfn += (next - addr) >> PAGE_SHIFT;
644 } while (addr = next, addr != end);
646 mutex_unlock(&kvm_hyp_pgd_mutex);
650 static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
652 if (!is_vmalloc_addr(kaddr)) {
653 BUG_ON(!virt_addr_valid(kaddr));
656 return page_to_phys(vmalloc_to_page(kaddr)) +
657 offset_in_page(kaddr);
662 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
663 * @from: The virtual kernel start address of the range
664 * @to: The virtual kernel end address of the range (exclusive)
665 * @prot: The protection to be applied to this range
667 * The same virtual address as the kernel virtual address is also used
668 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
671 int create_hyp_mappings(void *from, void *to, pgprot_t prot)
673 phys_addr_t phys_addr;
674 unsigned long virt_addr;
675 unsigned long start = kern_hyp_va((unsigned long)from);
676 unsigned long end = kern_hyp_va((unsigned long)to);
678 if (is_kernel_in_hyp_mode())
681 start = start & PAGE_MASK;
682 end = PAGE_ALIGN(end);
684 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
687 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
688 err = __create_hyp_mappings(hyp_pgd, virt_addr,
689 virt_addr + PAGE_SIZE,
690 __phys_to_pfn(phys_addr),
700 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
701 * @from: The kernel start VA of the range
702 * @to: The kernel end VA of the range (exclusive)
703 * @phys_addr: The physical start address which gets mapped
705 * The resulting HYP VA is the same as the kernel VA, modulo
708 int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
710 unsigned long start = kern_hyp_va((unsigned long)from);
711 unsigned long end = kern_hyp_va((unsigned long)to);
713 if (is_kernel_in_hyp_mode())
716 /* Check for a valid kernel IO mapping */
717 if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
720 return __create_hyp_mappings(hyp_pgd, start, end,
721 __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
725 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
726 * @kvm: The KVM struct pointer for the VM.
728 * Allocates only the stage-2 HW PGD level table(s) (can support either full
729 * 40-bit input addresses or limited to 32-bit input addresses). Clears the
732 * Note we don't need locking here as this is only called when the VM is
733 * created, which can only be done once.
735 int kvm_alloc_stage2_pgd(struct kvm *kvm)
739 if (kvm->arch.pgd != NULL) {
740 kvm_err("kvm_arch already initialized?\n");
744 /* Allocate the HW PGD, making sure that each page gets its own refcount */
745 pgd = alloc_pages_exact(S2_PGD_SIZE, GFP_KERNEL | __GFP_ZERO);
753 static void stage2_unmap_memslot(struct kvm *kvm,
754 struct kvm_memory_slot *memslot)
756 hva_t hva = memslot->userspace_addr;
757 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
758 phys_addr_t size = PAGE_SIZE * memslot->npages;
759 hva_t reg_end = hva + size;
762 * A memory region could potentially cover multiple VMAs, and any holes
763 * between them, so iterate over all of them to find out if we should
766 * +--------------------------------------------+
767 * +---------------+----------------+ +----------------+
768 * | : VMA 1 | VMA 2 | | VMA 3 : |
769 * +---------------+----------------+ +----------------+
771 * +--------------------------------------------+
774 struct vm_area_struct *vma = find_vma(current->mm, hva);
775 hva_t vm_start, vm_end;
777 if (!vma || vma->vm_start >= reg_end)
781 * Take the intersection of this VMA with the memory region
783 vm_start = max(hva, vma->vm_start);
784 vm_end = min(reg_end, vma->vm_end);
786 if (!(vma->vm_flags & VM_PFNMAP)) {
787 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
788 unmap_stage2_range(kvm, gpa, vm_end - vm_start);
791 } while (hva < reg_end);
795 * stage2_unmap_vm - Unmap Stage-2 RAM mappings
796 * @kvm: The struct kvm pointer
798 * Go through the memregions and unmap any reguler RAM
799 * backing memory already mapped to the VM.
801 void stage2_unmap_vm(struct kvm *kvm)
803 struct kvm_memslots *slots;
804 struct kvm_memory_slot *memslot;
807 idx = srcu_read_lock(&kvm->srcu);
808 down_read(¤t->mm->mmap_sem);
809 spin_lock(&kvm->mmu_lock);
811 slots = kvm_memslots(kvm);
812 kvm_for_each_memslot(memslot, slots)
813 stage2_unmap_memslot(kvm, memslot);
815 spin_unlock(&kvm->mmu_lock);
816 up_read(¤t->mm->mmap_sem);
817 srcu_read_unlock(&kvm->srcu, idx);
821 * kvm_free_stage2_pgd - free all stage-2 tables
822 * @kvm: The KVM struct pointer for the VM.
824 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
825 * underlying level-2 and level-3 tables before freeing the actual level-1 table
826 * and setting the struct pointer to NULL.
828 void kvm_free_stage2_pgd(struct kvm *kvm)
832 spin_lock(&kvm->mmu_lock);
834 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
835 pgd = READ_ONCE(kvm->arch.pgd);
836 kvm->arch.pgd = NULL;
838 spin_unlock(&kvm->mmu_lock);
840 /* Free the HW pgd, one page at a time */
842 free_pages_exact(pgd, S2_PGD_SIZE);
845 static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
851 pgd = kvm->arch.pgd + stage2_pgd_index(addr);
852 if (WARN_ON(stage2_pgd_none(*pgd))) {
855 pud = mmu_memory_cache_alloc(cache);
856 stage2_pgd_populate(pgd, pud);
857 get_page(virt_to_page(pgd));
860 return stage2_pud_offset(pgd, addr);
863 static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
869 pud = stage2_get_pud(kvm, cache, addr);
873 if (stage2_pud_none(*pud)) {
876 pmd = mmu_memory_cache_alloc(cache);
877 stage2_pud_populate(pud, pmd);
878 get_page(virt_to_page(pud));
881 return stage2_pmd_offset(pud, addr);
884 static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
885 *cache, phys_addr_t addr, const pmd_t *new_pmd)
889 pmd = stage2_get_pmd(kvm, cache, addr);
893 if (pmd_present(old_pmd)) {
895 * Multiple vcpus faulting on the same PMD entry, can
896 * lead to them sequentially updating the PMD with the
897 * same value. Following the break-before-make
898 * (pmd_clear() followed by tlb_flush()) process can
899 * hinder forward progress due to refaults generated
900 * on missing translations.
902 * Skip updating the page table if the entry is
905 if (pmd_val(old_pmd) == pmd_val(*new_pmd))
909 * Mapping in huge pages should only happen through a
910 * fault. If a page is merged into a transparent huge
911 * page, the individual subpages of that huge page
912 * should be unmapped through MMU notifiers before we
915 * Merging of CompoundPages is not supported; they
916 * should become splitting first, unmapped, merged,
917 * and mapped back in on-demand.
919 VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
922 kvm_tlb_flush_vmid_ipa(kvm, addr);
924 get_page(virt_to_page(pmd));
927 kvm_set_pmd(pmd, *new_pmd);
931 static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
932 phys_addr_t addr, const pte_t *new_pte,
937 bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
938 bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE;
940 VM_BUG_ON(logging_active && !cache);
942 /* Create stage-2 page table mapping - Levels 0 and 1 */
943 pmd = stage2_get_pmd(kvm, cache, addr);
946 * Ignore calls from kvm_set_spte_hva for unallocated
953 * While dirty page logging - dissolve huge PMD, then continue on to
957 stage2_dissolve_pmd(kvm, addr, pmd);
959 /* Create stage-2 page mappings - Level 2 */
960 if (pmd_none(*pmd)) {
962 return 0; /* ignore calls from kvm_set_spte_hva */
963 pte = mmu_memory_cache_alloc(cache);
964 pmd_populate_kernel(NULL, pmd, pte);
965 get_page(virt_to_page(pmd));
968 pte = pte_offset_kernel(pmd, addr);
970 if (iomap && pte_present(*pte))
973 /* Create 2nd stage page table mapping - Level 3 */
975 if (pte_present(old_pte)) {
976 /* Skip page table update if there is no change */
977 if (pte_val(old_pte) == pte_val(*new_pte))
980 kvm_set_pte(pte, __pte(0));
981 kvm_tlb_flush_vmid_ipa(kvm, addr);
983 get_page(virt_to_page(pte));
986 kvm_set_pte(pte, *new_pte);
990 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
991 static int stage2_ptep_test_and_clear_young(pte_t *pte)
993 if (pte_young(*pte)) {
994 *pte = pte_mkold(*pte);
1000 static int stage2_ptep_test_and_clear_young(pte_t *pte)
1002 return __ptep_test_and_clear_young(pte);
1006 static int stage2_pmdp_test_and_clear_young(pmd_t *pmd)
1008 return stage2_ptep_test_and_clear_young((pte_t *)pmd);
1012 * kvm_phys_addr_ioremap - map a device range to guest IPA
1014 * @kvm: The KVM pointer
1015 * @guest_ipa: The IPA at which to insert the mapping
1016 * @pa: The physical address of the device
1017 * @size: The size of the mapping
1019 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
1020 phys_addr_t pa, unsigned long size, bool writable)
1022 phys_addr_t addr, end;
1025 struct kvm_mmu_memory_cache cache = { 0, };
1027 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
1028 pfn = __phys_to_pfn(pa);
1030 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
1031 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
1034 pte = kvm_s2pte_mkwrite(pte);
1036 ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES,
1040 spin_lock(&kvm->mmu_lock);
1041 ret = stage2_set_pte(kvm, &cache, addr, &pte,
1042 KVM_S2PTE_FLAG_IS_IOMAP);
1043 spin_unlock(&kvm->mmu_lock);
1051 mmu_free_memory_cache(&cache);
1055 static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
1057 kvm_pfn_t pfn = *pfnp;
1058 gfn_t gfn = *ipap >> PAGE_SHIFT;
1060 if (PageTransCompoundMap(pfn_to_page(pfn))) {
1063 * The address we faulted on is backed by a transparent huge
1064 * page. However, because we map the compound huge page and
1065 * not the individual tail page, we need to transfer the
1066 * refcount to the head page. We have to be careful that the
1067 * THP doesn't start to split while we are adjusting the
1070 * We are sure this doesn't happen, because mmu_notifier_retry
1071 * was successful and we are holding the mmu_lock, so if this
1072 * THP is trying to split, it will be blocked in the mmu
1073 * notifier before touching any of the pages, specifically
1074 * before being able to call __split_huge_page_refcount().
1076 * We can therefore safely transfer the refcount from PG_tail
1077 * to PG_head and switch the pfn from a tail page to the head
1080 mask = PTRS_PER_PMD - 1;
1081 VM_BUG_ON((gfn & mask) != (pfn & mask));
1084 kvm_release_pfn_clean(pfn);
1096 static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
1098 if (kvm_vcpu_trap_is_iabt(vcpu))
1101 return kvm_vcpu_dabt_iswrite(vcpu);
1105 * stage2_wp_ptes - write protect PMD range
1106 * @pmd: pointer to pmd entry
1107 * @addr: range start address
1108 * @end: range end address
1110 static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
1114 pte = pte_offset_kernel(pmd, addr);
1116 if (!pte_none(*pte)) {
1117 if (!kvm_s2pte_readonly(pte))
1118 kvm_set_s2pte_readonly(pte);
1120 } while (pte++, addr += PAGE_SIZE, addr != end);
1124 * stage2_wp_pmds - write protect PUD range
1125 * @pud: pointer to pud entry
1126 * @addr: range start address
1127 * @end: range end address
1129 static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
1134 pmd = stage2_pmd_offset(pud, addr);
1137 next = stage2_pmd_addr_end(addr, end);
1138 if (!pmd_none(*pmd)) {
1139 if (pmd_thp_or_huge(*pmd)) {
1140 if (!kvm_s2pmd_readonly(pmd))
1141 kvm_set_s2pmd_readonly(pmd);
1143 stage2_wp_ptes(pmd, addr, next);
1146 } while (pmd++, addr = next, addr != end);
1150 * stage2_wp_puds - write protect PGD range
1151 * @pgd: pointer to pgd entry
1152 * @addr: range start address
1153 * @end: range end address
1155 * Process PUD entries, for a huge PUD we cause a panic.
1157 static void stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
1162 pud = stage2_pud_offset(pgd, addr);
1164 next = stage2_pud_addr_end(addr, end);
1165 if (!stage2_pud_none(*pud)) {
1166 /* TODO:PUD not supported, revisit later if supported */
1167 BUG_ON(stage2_pud_huge(*pud));
1168 stage2_wp_pmds(pud, addr, next);
1170 } while (pud++, addr = next, addr != end);
1174 * stage2_wp_range() - write protect stage2 memory region range
1175 * @kvm: The KVM pointer
1176 * @addr: Start address of range
1177 * @end: End address of range
1179 static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1184 pgd = kvm->arch.pgd + stage2_pgd_index(addr);
1187 * Release kvm_mmu_lock periodically if the memory region is
1188 * large. Otherwise, we may see kernel panics with
1189 * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
1190 * CONFIG_LOCKDEP. Additionally, holding the lock too long
1191 * will also starve other vCPUs.
1193 if (need_resched() || spin_needbreak(&kvm->mmu_lock))
1194 cond_resched_lock(&kvm->mmu_lock);
1196 next = stage2_pgd_addr_end(addr, end);
1197 if (stage2_pgd_present(*pgd))
1198 stage2_wp_puds(pgd, addr, next);
1199 } while (pgd++, addr = next, addr != end);
1203 * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
1204 * @kvm: The KVM pointer
1205 * @slot: The memory slot to write protect
1207 * Called to start logging dirty pages after memory region
1208 * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
1209 * all present PMD and PTEs are write protected in the memory region.
1210 * Afterwards read of dirty page log can be called.
1212 * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
1213 * serializing operations for VM memory regions.
1215 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
1217 struct kvm_memslots *slots = kvm_memslots(kvm);
1218 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
1219 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
1220 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1222 spin_lock(&kvm->mmu_lock);
1223 stage2_wp_range(kvm, start, end);
1224 spin_unlock(&kvm->mmu_lock);
1225 kvm_flush_remote_tlbs(kvm);
1229 * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
1230 * @kvm: The KVM pointer
1231 * @slot: The memory slot associated with mask
1232 * @gfn_offset: The gfn offset in memory slot
1233 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
1234 * slot to be write protected
1236 * Walks bits set in mask write protects the associated pte's. Caller must
1237 * acquire kvm_mmu_lock.
1239 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1240 struct kvm_memory_slot *slot,
1241 gfn_t gfn_offset, unsigned long mask)
1243 phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
1244 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
1245 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
1247 stage2_wp_range(kvm, start, end);
1251 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1254 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1255 * enable dirty logging for them.
1257 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1258 struct kvm_memory_slot *slot,
1259 gfn_t gfn_offset, unsigned long mask)
1261 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1264 static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
1265 unsigned long size, bool uncached)
1267 __coherent_cache_guest_page(vcpu, pfn, size, uncached);
1270 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1271 struct kvm_memory_slot *memslot, unsigned long hva,
1272 unsigned long fault_status)
1275 bool write_fault, writable, hugetlb = false, force_pte = false;
1276 unsigned long mmu_seq;
1277 gfn_t gfn = fault_ipa >> PAGE_SHIFT;
1278 struct kvm *kvm = vcpu->kvm;
1279 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
1280 struct vm_area_struct *vma;
1282 pgprot_t mem_type = PAGE_S2;
1283 bool fault_ipa_uncached;
1284 bool logging_active = memslot_is_logging(memslot);
1285 unsigned long flags = 0;
1287 write_fault = kvm_is_write_fault(vcpu);
1288 if (fault_status == FSC_PERM && !write_fault) {
1289 kvm_err("Unexpected L2 read permission error\n");
1293 /* Let's check if we will get back a huge page backed by hugetlbfs */
1294 down_read(¤t->mm->mmap_sem);
1295 vma = find_vma_intersection(current->mm, hva, hva + 1);
1296 if (unlikely(!vma)) {
1297 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
1298 up_read(¤t->mm->mmap_sem);
1302 if (vma_kernel_pagesize(vma) == PMD_SIZE && !logging_active) {
1304 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
1307 * Pages belonging to memslots that don't have the same
1308 * alignment for userspace and IPA cannot be mapped using
1309 * block descriptors even if the pages belong to a THP for
1310 * the process, because the stage-2 block descriptor will
1311 * cover more than a single THP and we loose atomicity for
1312 * unmapping, updates, and splits of the THP or other pages
1313 * in the stage-2 block range.
1315 if ((memslot->userspace_addr & ~PMD_MASK) !=
1316 ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
1319 up_read(¤t->mm->mmap_sem);
1321 /* We need minimum second+third level pages */
1322 ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
1327 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1329 * Ensure the read of mmu_notifier_seq happens before we call
1330 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
1331 * the page we just got a reference to gets unmapped before we have a
1332 * chance to grab the mmu_lock, which ensure that if the page gets
1333 * unmapped afterwards, the call to kvm_unmap_hva will take it away
1334 * from us again properly. This smp_rmb() interacts with the smp_wmb()
1335 * in kvm_mmu_notifier_invalidate_<page|range_end>.
1339 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
1340 if (is_error_noslot_pfn(pfn))
1343 if (kvm_is_device_pfn(pfn)) {
1344 mem_type = PAGE_S2_DEVICE;
1345 flags |= KVM_S2PTE_FLAG_IS_IOMAP;
1346 } else if (logging_active) {
1348 * Faults on pages in a memslot with logging enabled
1349 * should not be mapped with huge pages (it introduces churn
1350 * and performance degradation), so force a pte mapping.
1353 flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
1356 * Only actually map the page as writable if this was a write
1363 spin_lock(&kvm->mmu_lock);
1364 if (mmu_notifier_retry(kvm, mmu_seq))
1367 if (!hugetlb && !force_pte)
1368 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
1370 fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT;
1373 pmd_t new_pmd = pfn_pmd(pfn, mem_type);
1374 new_pmd = pmd_mkhuge(new_pmd);
1376 new_pmd = kvm_s2pmd_mkwrite(new_pmd);
1377 kvm_set_pfn_dirty(pfn);
1379 coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached);
1380 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
1382 pte_t new_pte = pfn_pte(pfn, mem_type);
1385 new_pte = kvm_s2pte_mkwrite(new_pte);
1386 kvm_set_pfn_dirty(pfn);
1387 mark_page_dirty(kvm, gfn);
1389 coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached);
1390 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
1394 spin_unlock(&kvm->mmu_lock);
1395 kvm_set_pfn_accessed(pfn);
1396 kvm_release_pfn_clean(pfn);
1401 * Resolve the access fault by making the page young again.
1402 * Note that because the faulting entry is guaranteed not to be
1403 * cached in the TLB, we don't need to invalidate anything.
1404 * Only the HW Access Flag updates are supported for Stage 2 (no DBM),
1405 * so there is no need for atomic (pte|pmd)_mkyoung operations.
1407 static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
1412 bool pfn_valid = false;
1414 trace_kvm_access_fault(fault_ipa);
1416 spin_lock(&vcpu->kvm->mmu_lock);
1418 pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa);
1419 if (!pmd || pmd_none(*pmd)) /* Nothing there */
1422 if (pmd_thp_or_huge(*pmd)) { /* THP, HugeTLB */
1423 *pmd = pmd_mkyoung(*pmd);
1424 pfn = pmd_pfn(*pmd);
1429 pte = pte_offset_kernel(pmd, fault_ipa);
1430 if (pte_none(*pte)) /* Nothing there either */
1433 *pte = pte_mkyoung(*pte); /* Just a page... */
1434 pfn = pte_pfn(*pte);
1437 spin_unlock(&vcpu->kvm->mmu_lock);
1439 kvm_set_pfn_accessed(pfn);
1443 * kvm_handle_guest_abort - handles all 2nd stage aborts
1444 * @vcpu: the VCPU pointer
1445 * @run: the kvm_run structure
1447 * Any abort that gets to the host is almost guaranteed to be caused by a
1448 * missing second stage translation table entry, which can mean that either the
1449 * guest simply needs more memory and we must allocate an appropriate page or it
1450 * can mean that the guest tried to access I/O memory, which is emulated by user
1451 * space. The distinction is based on the IPA causing the fault and whether this
1452 * memory region has been registered as standard RAM by user space.
1454 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
1456 unsigned long fault_status;
1457 phys_addr_t fault_ipa;
1458 struct kvm_memory_slot *memslot;
1460 bool is_iabt, write_fault, writable;
1464 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
1465 if (unlikely(!is_iabt && kvm_vcpu_dabt_isextabt(vcpu))) {
1466 kvm_inject_vabt(vcpu);
1470 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
1472 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
1473 kvm_vcpu_get_hfar(vcpu), fault_ipa);
1475 /* Check the stage-2 fault is trans. fault or write fault */
1476 fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
1477 if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
1478 fault_status != FSC_ACCESS) {
1479 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1480 kvm_vcpu_trap_get_class(vcpu),
1481 (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
1482 (unsigned long)kvm_vcpu_get_hsr(vcpu));
1486 idx = srcu_read_lock(&vcpu->kvm->srcu);
1488 gfn = fault_ipa >> PAGE_SHIFT;
1489 memslot = gfn_to_memslot(vcpu->kvm, gfn);
1490 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
1491 write_fault = kvm_is_write_fault(vcpu);
1492 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
1494 /* Prefetch Abort on I/O address */
1495 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1501 * Check for a cache maintenance operation. Since we
1502 * ended-up here, we know it is outside of any memory
1503 * slot. But we can't find out if that is for a device,
1504 * or if the guest is just being stupid. The only thing
1505 * we know for sure is that this range cannot be cached.
1507 * So let's assume that the guest is just being
1508 * cautious, and skip the instruction.
1510 if (kvm_vcpu_dabt_is_cm(vcpu)) {
1511 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1517 * The IPA is reported as [MAX:12], so we need to
1518 * complement it with the bottom 12 bits from the
1519 * faulting VA. This is always 12 bits, irrespective
1522 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
1523 ret = io_mem_abort(vcpu, run, fault_ipa);
1527 /* Userspace should not be able to register out-of-bounds IPAs */
1528 VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
1530 if (fault_status == FSC_ACCESS) {
1531 handle_access_fault(vcpu, fault_ipa);
1536 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
1540 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1544 static int handle_hva_to_gpa(struct kvm *kvm,
1545 unsigned long start,
1547 int (*handler)(struct kvm *kvm,
1548 gpa_t gpa, void *data),
1551 struct kvm_memslots *slots;
1552 struct kvm_memory_slot *memslot;
1555 slots = kvm_memslots(kvm);
1557 /* we only care about the pages that the guest sees */
1558 kvm_for_each_memslot(memslot, slots) {
1559 unsigned long hva_start, hva_end;
1562 hva_start = max(start, memslot->userspace_addr);
1563 hva_end = min(end, memslot->userspace_addr +
1564 (memslot->npages << PAGE_SHIFT));
1565 if (hva_start >= hva_end)
1569 * {gfn(page) | page intersects with [hva_start, hva_end)} =
1570 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
1572 gfn = hva_to_gfn_memslot(hva_start, memslot);
1573 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
1575 for (; gfn < gfn_end; ++gfn) {
1576 gpa_t gpa = gfn << PAGE_SHIFT;
1577 ret |= handler(kvm, gpa, data);
1584 static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
1586 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
1590 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
1592 unsigned long end = hva + PAGE_SIZE;
1597 trace_kvm_unmap_hva(hva);
1598 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
1602 int kvm_unmap_hva_range(struct kvm *kvm,
1603 unsigned long start, unsigned long end)
1608 trace_kvm_unmap_hva_range(start, end);
1609 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
1613 static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
1615 pte_t *pte = (pte_t *)data;
1618 * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
1619 * flag clear because MMU notifiers will have unmapped a huge PMD before
1620 * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and
1621 * therefore stage2_set_pte() never needs to clear out a huge PMD
1622 * through this calling path.
1624 stage2_set_pte(kvm, NULL, gpa, pte, 0);
1629 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1631 unsigned long end = hva + PAGE_SIZE;
1637 trace_kvm_set_spte_hva(hva);
1638 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
1639 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
1642 static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
1647 pmd = stage2_get_pmd(kvm, NULL, gpa);
1648 if (!pmd || pmd_none(*pmd)) /* Nothing there */
1651 if (pmd_thp_or_huge(*pmd)) /* THP, HugeTLB */
1652 return stage2_pmdp_test_and_clear_young(pmd);
1654 pte = pte_offset_kernel(pmd, gpa);
1658 return stage2_ptep_test_and_clear_young(pte);
1661 static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
1666 pmd = stage2_get_pmd(kvm, NULL, gpa);
1667 if (!pmd || pmd_none(*pmd)) /* Nothing there */
1670 if (pmd_thp_or_huge(*pmd)) /* THP, HugeTLB */
1671 return pmd_young(*pmd);
1673 pte = pte_offset_kernel(pmd, gpa);
1674 if (!pte_none(*pte)) /* Just a page... */
1675 return pte_young(*pte);
1680 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1684 trace_kvm_age_hva(start, end);
1685 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
1688 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1692 trace_kvm_test_age_hva(hva);
1693 return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
1696 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
1698 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1701 phys_addr_t kvm_mmu_get_httbr(void)
1703 if (__kvm_cpu_uses_extended_idmap())
1704 return virt_to_phys(merged_hyp_pgd);
1706 return virt_to_phys(hyp_pgd);
1709 phys_addr_t kvm_get_idmap_vector(void)
1711 return hyp_idmap_vector;
1714 phys_addr_t kvm_get_idmap_start(void)
1716 return hyp_idmap_start;
1719 static int kvm_map_idmap_text(pgd_t *pgd)
1723 /* Create the idmap in the boot page tables */
1724 err = __create_hyp_mappings(pgd,
1725 hyp_idmap_start, hyp_idmap_end,
1726 __phys_to_pfn(hyp_idmap_start),
1729 kvm_err("Failed to idmap %lx-%lx\n",
1730 hyp_idmap_start, hyp_idmap_end);
1735 int kvm_mmu_init(void)
1739 hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
1740 hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
1741 hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
1744 * We rely on the linker script to ensure at build time that the HYP
1745 * init code does not cross a page boundary.
1747 BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
1749 kvm_info("IDMAP page: %lx\n", hyp_idmap_start);
1750 kvm_info("HYP VA range: %lx:%lx\n",
1751 kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL));
1753 if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
1754 hyp_idmap_start < kern_hyp_va(~0UL) &&
1755 hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
1757 * The idmap page is intersecting with the VA space,
1758 * it is not safe to continue further.
1760 kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
1765 hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
1767 kvm_err("Hyp mode PGD not allocated\n");
1772 if (__kvm_cpu_uses_extended_idmap()) {
1773 boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1775 if (!boot_hyp_pgd) {
1776 kvm_err("Hyp boot PGD not allocated\n");
1781 err = kvm_map_idmap_text(boot_hyp_pgd);
1785 merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1786 if (!merged_hyp_pgd) {
1787 kvm_err("Failed to allocate extra HYP pgd\n");
1790 __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
1793 err = kvm_map_idmap_text(hyp_pgd);
1804 void kvm_arch_commit_memory_region(struct kvm *kvm,
1805 const struct kvm_userspace_memory_region *mem,
1806 const struct kvm_memory_slot *old,
1807 const struct kvm_memory_slot *new,
1808 enum kvm_mr_change change)
1811 * At this point memslot has been committed and there is an
1812 * allocated dirty_bitmap[], dirty pages will be be tracked while the
1813 * memory slot is write protected.
1815 if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
1816 kvm_mmu_wp_memory_region(kvm, mem->slot);
1819 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1820 struct kvm_memory_slot *memslot,
1821 const struct kvm_userspace_memory_region *mem,
1822 enum kvm_mr_change change)
1824 hva_t hva = mem->userspace_addr;
1825 hva_t reg_end = hva + mem->memory_size;
1826 bool writable = !(mem->flags & KVM_MEM_READONLY);
1829 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
1830 change != KVM_MR_FLAGS_ONLY)
1834 * Prevent userspace from creating a memory region outside of the IPA
1835 * space addressable by the KVM guest IPA space.
1837 if (memslot->base_gfn + memslot->npages >
1838 (KVM_PHYS_SIZE >> PAGE_SHIFT))
1841 down_read(¤t->mm->mmap_sem);
1843 * A memory region could potentially cover multiple VMAs, and any holes
1844 * between them, so iterate over all of them to find out if we can map
1845 * any of them right now.
1847 * +--------------------------------------------+
1848 * +---------------+----------------+ +----------------+
1849 * | : VMA 1 | VMA 2 | | VMA 3 : |
1850 * +---------------+----------------+ +----------------+
1852 * +--------------------------------------------+
1855 struct vm_area_struct *vma = find_vma(current->mm, hva);
1856 hva_t vm_start, vm_end;
1858 if (!vma || vma->vm_start >= reg_end)
1862 * Mapping a read-only VMA is only allowed if the
1863 * memory region is configured as read-only.
1865 if (writable && !(vma->vm_flags & VM_WRITE)) {
1871 * Take the intersection of this VMA with the memory region
1873 vm_start = max(hva, vma->vm_start);
1874 vm_end = min(reg_end, vma->vm_end);
1876 if (vma->vm_flags & VM_PFNMAP) {
1877 gpa_t gpa = mem->guest_phys_addr +
1878 (vm_start - mem->userspace_addr);
1881 pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
1882 pa += vm_start - vma->vm_start;
1884 /* IO region dirty page logging not allowed */
1885 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1890 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
1897 } while (hva < reg_end);
1899 if (change == KVM_MR_FLAGS_ONLY)
1902 spin_lock(&kvm->mmu_lock);
1904 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
1906 stage2_flush_memslot(kvm, memslot);
1907 spin_unlock(&kvm->mmu_lock);
1909 up_read(¤t->mm->mmap_sem);
1913 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1914 struct kvm_memory_slot *dont)
1918 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1919 unsigned long npages)
1922 * Readonly memslots are not incoherent with the caches by definition,
1923 * but in practice, they are used mostly to emulate ROMs or NOR flashes
1924 * that the guest may consider devices and hence map as uncached.
1925 * To prevent incoherency issues in these cases, tag all readonly
1926 * regions as incoherent.
1928 if (slot->flags & KVM_MEM_READONLY)
1929 slot->flags |= KVM_MEMSLOT_INCOHERENT;
1933 void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
1937 void kvm_arch_flush_shadow_all(struct kvm *kvm)
1939 kvm_free_stage2_pgd(kvm);
1942 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1943 struct kvm_memory_slot *slot)
1945 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
1946 phys_addr_t size = slot->npages << PAGE_SHIFT;
1948 spin_lock(&kvm->mmu_lock);
1949 unmap_stage2_range(kvm, gpa, size);
1950 spin_unlock(&kvm->mmu_lock);
1954 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
1957 * - S/W ops are local to a CPU (not broadcast)
1958 * - We have line migration behind our back (speculation)
1959 * - System caches don't support S/W at all (damn!)
1961 * In the face of the above, the best we can do is to try and convert
1962 * S/W ops to VA ops. Because the guest is not allowed to infer the
1963 * S/W to PA mapping, it can only use S/W to nuke the whole cache,
1964 * which is a rather good thing for us.
1966 * Also, it is only used when turning caches on/off ("The expected
1967 * usage of the cache maintenance instructions that operate by set/way
1968 * is associated with the cache maintenance instructions associated
1969 * with the powerdown and powerup of caches, if this is required by
1970 * the implementation.").
1972 * We use the following policy:
1974 * - If we trap a S/W operation, we enable VM trapping to detect
1975 * caches being turned on/off, and do a full clean.
1977 * - We flush the caches on both caches being turned on and off.
1979 * - Once the caches are enabled, we stop trapping VM ops.
1981 void kvm_set_way_flush(struct kvm_vcpu *vcpu)
1983 unsigned long hcr = vcpu_get_hcr(vcpu);
1986 * If this is the first time we do a S/W operation
1987 * (i.e. HCR_TVM not set) flush the whole memory, and set the
1990 * Otherwise, rely on the VM trapping to wait for the MMU +
1991 * Caches to be turned off. At that point, we'll be able to
1992 * clean the caches again.
1994 if (!(hcr & HCR_TVM)) {
1995 trace_kvm_set_way_flush(*vcpu_pc(vcpu),
1996 vcpu_has_cache_enabled(vcpu));
1997 stage2_flush_vm(vcpu->kvm);
1998 vcpu_set_hcr(vcpu, hcr | HCR_TVM);
2002 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
2004 bool now_enabled = vcpu_has_cache_enabled(vcpu);
2007 * If switching the MMU+caches on, need to invalidate the caches.
2008 * If switching it off, need to clean the caches.
2009 * Clean + invalidate does the trick always.
2011 if (now_enabled != was_enabled)
2012 stage2_flush_vm(vcpu->kvm);
2014 /* Caches are now on, stop trapping VM ops (until a S/W op) */
2016 vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM);
2018 trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);