1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
6 #include <linux/highmem.h>
7 #include <linux/hugetlb.h>
8 #include <linux/kvm_host.h>
9 #include <linux/page-flags.h>
10 #include <linux/uaccess.h>
11 #include <asm/mmu_context.h>
12 #include <asm/pgalloc.h>
14 #include <asm/kvm_mmu.h>
16 static inline void kvm_ptw_prepare(struct kvm *kvm, kvm_ptw_ctx *ctx)
18 ctx->level = kvm->arch.root_level;
20 ctx->invalid_ptes = kvm->arch.invalid_ptes;
21 ctx->pte_shifts = kvm->arch.pte_shifts;
22 ctx->pgtable_shift = ctx->pte_shifts[ctx->level];
23 ctx->invalid_entry = ctx->invalid_ptes[ctx->level];
28 * Mark a range of guest physical address space old (all accesses fault) in the
29 * VM's GPA page table to allow detection of commonly used pages.
31 static int kvm_mkold_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx)
33 if (kvm_pte_young(*pte)) {
34 *pte = kvm_pte_mkold(*pte);
42 * Mark a range of guest physical address space clean (writes fault) in the VM's
43 * GPA page table to allow dirty page tracking.
45 static int kvm_mkclean_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx)
52 * For kvm_arch_mmu_enable_log_dirty_pt_masked with mask, start and end
53 * may cross hugepage, for first huge page parameter addr is equal to
54 * start, however for the second huge page addr is base address of
55 * this huge page, rather than start or end address
57 if ((ctx->flag & _KVM_HAS_PGMASK) && !kvm_pte_huge(val)) {
58 offset = (addr >> PAGE_SHIFT) - ctx->gfn;
59 if (!(BIT(offset) & ctx->mask))
64 * Need not split huge page now, just set write-proect pte bit
65 * Split huge page until next write fault
67 if (kvm_pte_dirty(val)) {
68 *pte = kvm_pte_mkclean(val);
78 static int kvm_flush_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx)
84 kvm->stat.hugepages--;
88 *pte = ctx->invalid_entry;
94 * kvm_pgd_alloc() - Allocate and initialise a KVM GPA page directory.
96 * Allocate a blank KVM GPA page directory (PGD) for representing guest physical
97 * to host physical page mappings.
99 * Returns: Pointer to new KVM GPA page directory.
100 * NULL on allocation failure.
102 kvm_pte_t *kvm_pgd_alloc(void)
106 pgd = (kvm_pte_t *)__get_free_pages(GFP_KERNEL, 0);
108 pgd_init((void *)pgd);
113 static void _kvm_pte_init(void *addr, unsigned long val)
115 unsigned long *p, *end;
117 p = (unsigned long *)addr;
118 end = p + PTRS_PER_PTE;
133 * Caller must hold kvm->mm_lock
135 * Walk the page tables of kvm to find the PTE corresponding to the
136 * address @addr. If page tables don't exist for @addr, they will be created
137 * from the MMU cache if @cache is not NULL.
139 static kvm_pte_t *kvm_populate_gpa(struct kvm *kvm,
140 struct kvm_mmu_memory_cache *cache,
141 unsigned long addr, int level)
144 kvm_pte_t *entry, *child;
146 kvm_ptw_prepare(kvm, &ctx);
147 child = kvm->arch.pgd;
148 while (ctx.level > level) {
149 entry = kvm_pgtable_offset(&ctx, child, addr);
150 if (kvm_pte_none(&ctx, entry)) {
154 child = kvm_mmu_memory_cache_alloc(cache);
155 _kvm_pte_init(child, ctx.invalid_ptes[ctx.level - 1]);
156 kvm_set_pte(entry, __pa(child));
157 } else if (kvm_pte_huge(*entry)) {
160 child = (kvm_pte_t *)__va(PHYSADDR(*entry));
164 entry = kvm_pgtable_offset(&ctx, child, addr);
170 * Page walker for VM shadow mmu at last level
171 * The last level is small pte page or huge pmd page
173 static int kvm_ptw_leaf(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx)
176 phys_addr_t next, start, size;
177 struct list_head *list;
178 kvm_pte_t *entry, *child;
182 child = (kvm_pte_t *)__va(PHYSADDR(*dir));
183 entry = kvm_pgtable_offset(ctx, child, addr);
185 next = addr + (0x1UL << ctx->pgtable_shift);
186 if (!kvm_pte_present(ctx, entry))
189 ret |= ctx->ops(entry, addr, ctx);
190 } while (entry++, addr = next, addr < end);
192 if (kvm_need_flush(ctx)) {
193 size = 0x1UL << (ctx->pgtable_shift + PAGE_SHIFT - 3);
194 if (start + size == end) {
195 list = (struct list_head *)child;
196 list_add_tail(list, &ctx->list);
197 *dir = ctx->invalid_ptes[ctx->level + 1];
205 * Page walker for VM shadow mmu at page table dir level
207 static int kvm_ptw_dir(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx)
210 phys_addr_t next, start, size;
211 struct list_head *list;
212 kvm_pte_t *entry, *child;
216 child = (kvm_pte_t *)__va(PHYSADDR(*dir));
217 entry = kvm_pgtable_offset(ctx, child, addr);
219 next = kvm_pgtable_addr_end(ctx, addr, end);
220 if (!kvm_pte_present(ctx, entry))
223 if (kvm_pte_huge(*entry)) {
224 ret |= ctx->ops(entry, addr, ctx);
230 ret |= kvm_ptw_leaf(entry, addr, next, ctx);
232 ret |= kvm_ptw_dir(entry, addr, next, ctx);
234 } while (entry++, addr = next, addr < end);
236 if (kvm_need_flush(ctx)) {
237 size = 0x1UL << (ctx->pgtable_shift + PAGE_SHIFT - 3);
238 if (start + size == end) {
239 list = (struct list_head *)child;
240 list_add_tail(list, &ctx->list);
241 *dir = ctx->invalid_ptes[ctx->level + 1];
249 * Page walker for VM shadow mmu at page root table
251 static int kvm_ptw_top(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx)
258 entry = kvm_pgtable_offset(ctx, dir, addr);
260 next = kvm_pgtable_addr_end(ctx, addr, end);
261 if (!kvm_pte_present(ctx, entry))
265 ret |= kvm_ptw_dir(entry, addr, next, ctx);
267 } while (entry++, addr = next, addr < end);
273 * kvm_flush_range() - Flush a range of guest physical addresses.
275 * @start_gfn: Guest frame number of first page in GPA range to flush.
276 * @end_gfn: Guest frame number of last page in GPA range to flush.
277 * @lock: Whether to hold mmu_lock or not
279 * Flushes a range of GPA mappings from the GPA page tables.
281 static void kvm_flush_range(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn, int lock)
285 struct list_head *pos, *temp;
287 ctx.ops = kvm_flush_pte;
288 ctx.flag = _KVM_FLUSH_PGTABLE;
289 kvm_ptw_prepare(kvm, &ctx);
290 INIT_LIST_HEAD(&ctx.list);
293 spin_lock(&kvm->mmu_lock);
294 ret = kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT,
295 end_gfn << PAGE_SHIFT, &ctx);
296 spin_unlock(&kvm->mmu_lock);
298 ret = kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT,
299 end_gfn << PAGE_SHIFT, &ctx);
301 /* Flush vpid for each vCPU individually */
303 kvm_flush_remote_tlbs(kvm);
306 * free pte table page after mmu_lock
307 * the pte table page is linked together with ctx.list
309 list_for_each_safe(pos, temp, &ctx.list) {
311 free_page((unsigned long)pos);
316 * kvm_mkclean_gpa_pt() - Make a range of guest physical addresses clean.
318 * @start_gfn: Guest frame number of first page in GPA range to flush.
319 * @end_gfn: Guest frame number of last page in GPA range to flush.
321 * Make a range of GPA mappings clean so that guest writes will fault and
322 * trigger dirty page logging.
324 * The caller must hold the @kvm->mmu_lock spinlock.
326 * Returns: Whether any GPA mappings were modified, which would require
327 * derived mappings (GVA page tables & TLB enties) to be
330 static int kvm_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
334 ctx.ops = kvm_mkclean_pte;
336 kvm_ptw_prepare(kvm, &ctx);
337 return kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT, end_gfn << PAGE_SHIFT, &ctx);
341 * kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages
342 * @kvm: The KVM pointer
343 * @slot: The memory slot associated with mask
344 * @gfn_offset: The gfn offset in memory slot
345 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
346 * slot to be write protected
348 * Walks bits set in mask write protects the associated pte's. Caller must
349 * acquire @kvm->mmu_lock.
351 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
352 struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask)
355 gfn_t base_gfn = slot->base_gfn + gfn_offset;
356 gfn_t start = base_gfn + __ffs(mask);
357 gfn_t end = base_gfn + __fls(mask) + 1;
359 ctx.ops = kvm_mkclean_pte;
360 ctx.flag = _KVM_HAS_PGMASK;
363 kvm_ptw_prepare(kvm, &ctx);
365 kvm_ptw_top(kvm->arch.pgd, start << PAGE_SHIFT, end << PAGE_SHIFT, &ctx);
368 void kvm_arch_commit_memory_region(struct kvm *kvm,
369 struct kvm_memory_slot *old,
370 const struct kvm_memory_slot *new,
371 enum kvm_mr_change change)
376 * If dirty page logging is enabled, write protect all pages in the slot
377 * ready for dirty logging.
379 * There is no need to do this in any of the following cases:
380 * CREATE: No dirty mappings will already exist.
381 * MOVE/DELETE: The old mappings will already have been cleaned up by
382 * kvm_arch_flush_shadow_memslot()
384 if (change == KVM_MR_FLAGS_ONLY &&
385 (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
386 new->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
387 spin_lock(&kvm->mmu_lock);
388 /* Write protect GPA page table entries */
389 needs_flush = kvm_mkclean_gpa_pt(kvm, new->base_gfn,
390 new->base_gfn + new->npages);
391 spin_unlock(&kvm->mmu_lock);
393 kvm_flush_remote_tlbs(kvm);
397 void kvm_arch_flush_shadow_all(struct kvm *kvm)
399 kvm_flush_range(kvm, 0, kvm->arch.gpa_size >> PAGE_SHIFT, 0);
402 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
405 * The slot has been made invalid (ready for moving or deletion), so we
406 * need to ensure that it can no longer be accessed by any guest vCPUs.
408 kvm_flush_range(kvm, slot->base_gfn, slot->base_gfn + slot->npages, 1);
411 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
416 ctx.ops = kvm_flush_pte;
417 kvm_ptw_prepare(kvm, &ctx);
418 INIT_LIST_HEAD(&ctx.list);
420 return kvm_ptw_top(kvm->arch.pgd, range->start << PAGE_SHIFT,
421 range->end << PAGE_SHIFT, &ctx);
424 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
426 unsigned long prot_bits;
428 kvm_pfn_t pfn = pte_pfn(range->arg.pte);
429 gpa_t gpa = range->start << PAGE_SHIFT;
431 ptep = kvm_populate_gpa(kvm, NULL, gpa, 0);
435 /* Replacing an absent or old page doesn't need flushes */
436 if (!kvm_pte_present(NULL, ptep) || !kvm_pte_young(*ptep)) {
437 kvm_set_pte(ptep, 0);
441 /* Fill new pte if write protected or page migrated */
442 prot_bits = _PAGE_PRESENT | __READABLE;
443 prot_bits |= _CACHE_MASK & pte_val(range->arg.pte);
446 * Set _PAGE_WRITE or _PAGE_DIRTY iff old and new pte both support
447 * _PAGE_WRITE for map_page_fast if next page write fault
448 * _PAGE_DIRTY since gpa has already recorded as dirty page
450 prot_bits |= __WRITEABLE & *ptep & pte_val(range->arg.pte);
451 kvm_set_pte(ptep, kvm_pfn_pte(pfn, __pgprot(prot_bits)));
456 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
461 ctx.ops = kvm_mkold_pte;
462 kvm_ptw_prepare(kvm, &ctx);
464 return kvm_ptw_top(kvm->arch.pgd, range->start << PAGE_SHIFT,
465 range->end << PAGE_SHIFT, &ctx);
468 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
470 gpa_t gpa = range->start << PAGE_SHIFT;
471 kvm_pte_t *ptep = kvm_populate_gpa(kvm, NULL, gpa, 0);
473 if (ptep && kvm_pte_present(NULL, ptep) && kvm_pte_young(*ptep))
480 * kvm_map_page_fast() - Fast path GPA fault handler.
481 * @vcpu: vCPU pointer.
482 * @gpa: Guest physical address of fault.
483 * @write: Whether the fault was due to a write.
485 * Perform fast path GPA fault handling, doing all that can be done without
486 * calling into KVM. This handles marking old pages young (for idle page
487 * tracking), and dirtying of clean pages (for dirty page logging).
489 * Returns: 0 on success, in which case we can update derived mappings and
490 * resume guest execution.
491 * -EFAULT on failure due to absent GPA mapping or write to
492 * read-only page, in which case KVM must be consulted.
494 static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
498 kvm_pte_t *ptep, changed, new;
499 gfn_t gfn = gpa >> PAGE_SHIFT;
500 struct kvm *kvm = vcpu->kvm;
501 struct kvm_memory_slot *slot;
503 spin_lock(&kvm->mmu_lock);
505 /* Fast path - just check GPA page table for an existing entry */
506 ptep = kvm_populate_gpa(kvm, NULL, gpa, 0);
507 if (!ptep || !kvm_pte_present(NULL, ptep)) {
512 /* Track access to pages marked old */
514 if (!kvm_pte_young(new))
515 new = kvm_pte_mkyoung(new);
516 /* call kvm_set_pfn_accessed() after unlock */
518 if (write && !kvm_pte_dirty(new)) {
519 if (!kvm_pte_write(new)) {
524 if (kvm_pte_huge(new)) {
526 * Do not set write permission when dirty logging is
527 * enabled for HugePages
529 slot = gfn_to_memslot(kvm, gfn);
530 if (kvm_slot_dirty_track_enabled(slot)) {
536 /* Track dirtying of writeable pages */
537 new = kvm_pte_mkdirty(new);
540 changed = new ^ (*ptep);
542 kvm_set_pte(ptep, new);
543 pfn = kvm_pte_pfn(new);
545 spin_unlock(&kvm->mmu_lock);
548 * Fixme: pfn may be freed after mmu_lock
549 * kvm_try_get_pfn(pfn)/kvm_release_pfn pair to prevent this?
551 if (kvm_pte_young(changed))
552 kvm_set_pfn_accessed(pfn);
554 if (kvm_pte_dirty(changed)) {
555 mark_page_dirty(kvm, gfn);
556 kvm_set_pfn_dirty(pfn);
560 spin_unlock(&kvm->mmu_lock);
564 static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot,
565 unsigned long hva, unsigned long map_size, bool write)
569 hva_t uaddr_start, uaddr_end;
571 /* Disable dirty logging on HugePages */
572 if (kvm_slot_dirty_track_enabled(memslot) && write)
575 size = memslot->npages * PAGE_SIZE;
576 gpa_start = memslot->base_gfn << PAGE_SHIFT;
577 uaddr_start = memslot->userspace_addr;
578 uaddr_end = uaddr_start + size;
581 * Pages belonging to memslots that don't have the same alignment
582 * within a PMD for userspace and GPA cannot be mapped with stage-2
583 * PMD entries, because we'll end up mapping the wrong pages.
585 * Consider a layout like the following:
587 * memslot->userspace_addr:
588 * +-----+--------------------+--------------------+---+
589 * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz|
590 * +-----+--------------------+--------------------+---+
592 * memslot->base_gfn << PAGE_SIZE:
593 * +---+--------------------+--------------------+-----+
594 * |abc|def Stage-2 block | Stage-2 block |tvxyz|
595 * +---+--------------------+--------------------+-----+
597 * If we create those stage-2 blocks, we'll end up with this incorrect
603 if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
607 * Next, let's make sure we're not trying to map anything not covered
608 * by the memslot. This means we have to prohibit block size mappings
609 * for the beginning and end of a non-block aligned and non-block sized
610 * memory slot (illustrated by the head and tail parts of the
611 * userspace view above containing pages 'abcde' and 'xyz',
614 * Note that it doesn't matter if we do the check using the
615 * userspace_addr or the base_gfn, as both are equally aligned (per
616 * the check above) and equally sized.
618 return (hva & ~(map_size - 1)) >= uaddr_start &&
619 (hva & ~(map_size - 1)) + map_size <= uaddr_end;
623 * Lookup the mapping level for @gfn in the current mm.
625 * WARNING! Use of host_pfn_mapping_level() requires the caller and the end
626 * consumer to be tied into KVM's handlers for MMU notifier events!
628 * There are several ways to safely use this helper:
630 * - Check mmu_invalidate_retry_hva() after grabbing the mapping level, before
631 * consuming it. In this case, mmu_lock doesn't need to be held during the
632 * lookup, but it does need to be held while checking the MMU notifier.
634 * - Hold mmu_lock AND ensure there is no in-progress MMU notifier invalidation
635 * event for the hva. This can be done by explicit checking the MMU notifier
636 * or by ensuring that KVM already has a valid mapping that covers the hva.
638 * - Do not use the result to install new mappings, e.g. use the host mapping
639 * level only to decide whether or not to zap an entry. In this case, it's
640 * not required to hold mmu_lock (though it's highly likely the caller will
641 * want to hold mmu_lock anyways, e.g. to modify SPTEs).
643 * Note! The lookup can still race with modifications to host page tables, but
644 * the above "rules" ensure KVM will not _consume_ the result of the walk if a
645 * race with the primary MMU occurs.
647 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
648 const struct kvm_memory_slot *slot)
659 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
660 * is not solely for performance, it's also necessary to avoid the
661 * "writable" check in __gfn_to_hva_many(), which will always fail on
662 * read-only memslots due to gfn_to_hva() assuming writes. Earlier
663 * page fault steps have already verified the guest isn't writing a
666 hva = __gfn_to_hva_memslot(slot, gfn);
669 * Disable IRQs to prevent concurrent tear down of host page tables,
670 * e.g. if the primary MMU promotes a P*D to a huge page and then frees
671 * the original page table.
673 local_irq_save(flags);
676 * Read each entry once. As above, a non-leaf entry can be promoted to
677 * a huge page _during_ this walk. Re-reading the entry could send the
678 * walk into the weeks, e.g. p*d_large() returns false (sees the old
679 * value) and then p*d_offset() walks into the target huge page instead
680 * of the old page table (sees the new value).
682 pgd = READ_ONCE(*pgd_offset(kvm->mm, hva));
686 p4d = READ_ONCE(*p4d_offset(&pgd, hva));
687 if (p4d_none(p4d) || !p4d_present(p4d))
690 pud = READ_ONCE(*pud_offset(&p4d, hva));
691 if (pud_none(pud) || !pud_present(pud))
694 pmd = READ_ONCE(*pmd_offset(&pud, hva));
695 if (pmd_none(pmd) || !pmd_present(pmd))
698 if (kvm_pte_huge(pmd_val(pmd)))
702 local_irq_restore(flags);
709 static kvm_pte_t *kvm_split_huge(struct kvm_vcpu *vcpu, kvm_pte_t *ptep, gfn_t gfn)
712 kvm_pte_t val, *child;
713 struct kvm *kvm = vcpu->kvm;
714 struct kvm_mmu_memory_cache *memcache;
716 memcache = &vcpu->arch.mmu_page_cache;
717 child = kvm_mmu_memory_cache_alloc(memcache);
718 val = kvm_pte_mksmall(*ptep);
719 for (i = 0; i < PTRS_PER_PTE; i++) {
720 kvm_set_pte(child + i, val);
724 /* The later kvm_flush_tlb_gpa() will flush hugepage tlb */
725 kvm_set_pte(ptep, __pa(child));
727 kvm->stat.hugepages--;
728 kvm->stat.pages += PTRS_PER_PTE;
730 return child + (gfn & (PTRS_PER_PTE - 1));
734 * kvm_map_page() - Map a guest physical page.
735 * @vcpu: vCPU pointer.
736 * @gpa: Guest physical address of fault.
737 * @write: Whether the fault was due to a write.
739 * Handle GPA faults by creating a new GPA mapping (or updating an existing
742 * This takes care of marking pages young or dirty (idle/dirty page tracking),
743 * asking KVM for the corresponding PFN, and creating a mapping in the GPA page
744 * tables. Derived mappings (GVA page tables and TLBs) must be handled by the
747 * Returns: 0 on success
748 * -EFAULT if there is no memory region at @gpa or a write was
749 * attempted to a read-only memory region. This is usually handled
752 static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
755 int srcu_idx, err, retry_no = 0, level;
756 unsigned long hva, mmu_seq, prot_bits;
758 kvm_pte_t *ptep, new_pte;
759 gfn_t gfn = gpa >> PAGE_SHIFT;
760 struct kvm *kvm = vcpu->kvm;
761 struct kvm_memory_slot *memslot;
762 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
764 /* Try the fast path to handle old / clean pages */
765 srcu_idx = srcu_read_lock(&kvm->srcu);
766 err = kvm_map_page_fast(vcpu, gpa, write);
770 memslot = gfn_to_memslot(kvm, gfn);
771 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writeable);
772 if (kvm_is_error_hva(hva) || (write && !writeable)) {
777 /* We need a minimum of cached pages ready for page table creation */
778 err = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES);
784 * Used to check for invalidations in progress, of the pfn that is
785 * returned by pfn_to_pfn_prot below.
787 mmu_seq = kvm->mmu_invalidate_seq;
789 * Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads in
790 * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
791 * risk the page we get a reference to getting unmapped before we have a
792 * chance to grab the mmu_lock without mmu_invalidate_retry() noticing.
794 * This smp_rmb() pairs with the effective smp_wmb() of the combination
795 * of the pte_unmap_unlock() after the PTE is zapped, and the
796 * spin_lock() in kvm_mmu_invalidate_invalidate_<page|range_end>() before
797 * mmu_invalidate_seq is incremented.
801 /* Slow path - ask KVM core whether we can access this GPA */
802 pfn = gfn_to_pfn_prot(kvm, gfn, write, &writeable);
803 if (is_error_noslot_pfn(pfn)) {
808 /* Check if an invalidation has taken place since we got pfn */
809 spin_lock(&kvm->mmu_lock);
810 if (mmu_invalidate_retry_hva(kvm, mmu_seq, hva)) {
812 * This can happen when mappings are changed asynchronously, but
813 * also synchronously if a COW is triggered by
816 spin_unlock(&kvm->mmu_lock);
817 kvm_release_pfn_clean(pfn);
818 if (retry_no > 100) {
827 * For emulated devices such virtio device, actual cache attribute is
828 * determined by physical machine.
829 * For pass through physical device, it should be uncachable
831 prot_bits = _PAGE_PRESENT | __READABLE;
833 prot_bits |= _CACHE_CC;
835 prot_bits |= _CACHE_SUC;
838 prot_bits |= _PAGE_WRITE;
840 prot_bits |= __WRITEABLE;
843 /* Disable dirty logging on HugePages */
845 if (!fault_supports_huge_mapping(memslot, hva, PMD_SIZE, write)) {
848 level = host_pfn_mapping_level(kvm, gfn, memslot);
850 gfn = gfn & ~(PTRS_PER_PTE - 1);
851 pfn = pfn & ~(PTRS_PER_PTE - 1);
855 /* Ensure page tables are allocated */
856 ptep = kvm_populate_gpa(kvm, memcache, gpa, level);
857 new_pte = kvm_pfn_pte(pfn, __pgprot(prot_bits));
859 new_pte = kvm_pte_mkhuge(new_pte);
861 * previous pmd entry is invalid_pte_table
862 * there is invalid tlb with small page
863 * need flush these invalid tlbs for current vcpu
865 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
866 ++kvm->stat.hugepages;
867 } else if (kvm_pte_huge(*ptep) && write)
868 ptep = kvm_split_huge(vcpu, ptep, gfn);
871 kvm_set_pte(ptep, new_pte);
872 spin_unlock(&kvm->mmu_lock);
874 if (prot_bits & _PAGE_DIRTY) {
875 mark_page_dirty_in_slot(kvm, memslot, gfn);
876 kvm_set_pfn_dirty(pfn);
879 kvm_set_pfn_accessed(pfn);
880 kvm_release_pfn_clean(pfn);
882 srcu_read_unlock(&kvm->srcu, srcu_idx);
886 int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
890 ret = kvm_map_page(vcpu, gpa, write);
894 /* Invalidate this entry in the TLB */
895 kvm_flush_tlb_gpa(vcpu, gpa);
900 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
904 int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_memory_slot *old,
905 struct kvm_memory_slot *new, enum kvm_mr_change change)
910 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
911 const struct kvm_memory_slot *memslot)
913 kvm_flush_remote_tlbs(kvm);