2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13 * Yaniv Kamay <yaniv@qumranet.com>
14 * Avi Kivity <avi@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
24 #include "kvm_cache_regs.h"
27 #include <linux/kvm_host.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
31 #include <linux/highmem.h>
32 #include <linux/moduleparam.h>
33 #include <linux/export.h>
34 #include <linux/swap.h>
35 #include <linux/hugetlb.h>
36 #include <linux/compiler.h>
37 #include <linux/srcu.h>
38 #include <linux/slab.h>
39 #include <linux/uaccess.h>
40 #include <linux/kthread.h>
43 #include <asm/cmpxchg.h>
46 #include <asm/kvm_page_track.h>
48 extern bool itlb_multihit_kvm_mitigation;
50 static int __read_mostly nx_huge_pages = -1;
51 static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
53 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
54 static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
56 static struct kernel_param_ops nx_huge_pages_ops = {
57 .set = set_nx_huge_pages,
58 .get = param_get_bool,
61 static struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = {
62 .set = set_nx_huge_pages_recovery_ratio,
63 .get = param_get_uint,
66 module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
67 __MODULE_PARM_TYPE(nx_huge_pages, "bool");
68 module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops,
69 &nx_huge_pages_recovery_ratio, 0644);
70 __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
73 * When setting this variable to true it enables Two-Dimensional-Paging
74 * where the hardware walks 2 page tables:
75 * 1. the guest-virtual to guest-physical
76 * 2. while doing 1. it walks guest-physical to host-physical
77 * If the hardware supports that we don't need to do shadow paging.
79 bool tdp_enabled = false;
83 AUDIT_POST_PAGE_FAULT,
94 module_param(dbg, bool, 0644);
96 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
97 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
98 #define MMU_WARN_ON(x) WARN_ON(x)
100 #define pgprintk(x...) do { } while (0)
101 #define rmap_printk(x...) do { } while (0)
102 #define MMU_WARN_ON(x) do { } while (0)
105 #define PTE_PREFETCH_NUM 8
107 #define PT_FIRST_AVAIL_BITS_SHIFT 10
108 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
110 #define PT64_LEVEL_BITS 9
112 #define PT64_LEVEL_SHIFT(level) \
113 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
115 #define PT64_INDEX(address, level)\
116 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
119 #define PT32_LEVEL_BITS 10
121 #define PT32_LEVEL_SHIFT(level) \
122 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
124 #define PT32_LVL_OFFSET_MASK(level) \
125 (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
126 * PT32_LEVEL_BITS))) - 1))
128 #define PT32_INDEX(address, level)\
129 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
132 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
133 #define PT64_DIR_BASE_ADDR_MASK \
134 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
135 #define PT64_LVL_ADDR_MASK(level) \
136 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
137 * PT64_LEVEL_BITS))) - 1))
138 #define PT64_LVL_OFFSET_MASK(level) \
139 (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
140 * PT64_LEVEL_BITS))) - 1))
142 #define PT32_BASE_ADDR_MASK PAGE_MASK
143 #define PT32_DIR_BASE_ADDR_MASK \
144 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
145 #define PT32_LVL_ADDR_MASK(level) \
146 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
147 * PT32_LEVEL_BITS))) - 1))
149 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
150 | shadow_x_mask | shadow_nx_mask)
152 #define ACC_EXEC_MASK 1
153 #define ACC_WRITE_MASK PT_WRITABLE_MASK
154 #define ACC_USER_MASK PT_USER_MASK
155 #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
157 #include <trace/events/kvm.h>
159 #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
160 #define SPTE_MMU_WRITEABLE (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1))
162 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
164 /* make pte_list_desc fit well in cache line */
165 #define PTE_LIST_EXT 3
168 * Return values of handle_mmio_page_fault and mmu.page_fault:
169 * RET_PF_RETRY: let CPU fault again on the address.
170 * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
172 * For handle_mmio_page_fault only:
173 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
181 struct pte_list_desc {
182 u64 *sptes[PTE_LIST_EXT];
183 struct pte_list_desc *more;
186 struct kvm_shadow_walk_iterator {
194 #define for_each_shadow_entry(_vcpu, _addr, _walker) \
195 for (shadow_walk_init(&(_walker), _vcpu, _addr); \
196 shadow_walk_okay(&(_walker)); \
197 shadow_walk_next(&(_walker)))
199 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \
200 for (shadow_walk_init(&(_walker), _vcpu, _addr); \
201 shadow_walk_okay(&(_walker)) && \
202 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
203 __shadow_walk_next(&(_walker), spte))
205 static struct kmem_cache *pte_list_desc_cache;
206 static struct kmem_cache *mmu_page_header_cache;
207 static struct percpu_counter kvm_total_used_mmu_pages;
209 static u64 __read_mostly shadow_nx_mask;
210 static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
211 static u64 __read_mostly shadow_user_mask;
212 static u64 __read_mostly shadow_accessed_mask;
213 static u64 __read_mostly shadow_dirty_mask;
214 static u64 __read_mostly shadow_mmio_mask;
215 static u64 __read_mostly shadow_present_mask;
217 static void mmu_spte_set(u64 *sptep, u64 spte);
218 static bool is_executable_pte(u64 spte);
219 static void mmu_free_roots(struct kvm_vcpu *vcpu);
221 #define CREATE_TRACE_POINTS
222 #include "mmutrace.h"
224 void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
226 shadow_mmio_mask = mmio_mask;
228 EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
230 static bool is_nx_huge_page_enabled(void)
232 return READ_ONCE(nx_huge_pages);
236 * the low bit of the generation number is always presumed to be zero.
237 * This disables mmio caching during memslot updates. The concept is
238 * similar to a seqcount but instead of retrying the access we just punt
239 * and ignore the cache.
241 * spte bits 3-11 are used as bits 1-9 of the generation number,
242 * the bits 52-61 are used as bits 10-19 of the generation number.
244 #define MMIO_SPTE_GEN_LOW_SHIFT 2
245 #define MMIO_SPTE_GEN_HIGH_SHIFT 52
247 #define MMIO_GEN_SHIFT 20
248 #define MMIO_GEN_LOW_SHIFT 10
249 #define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 2)
250 #define MMIO_GEN_MASK ((1 << MMIO_GEN_SHIFT) - 1)
252 static u64 generation_mmio_spte_mask(unsigned int gen)
256 WARN_ON(gen & ~MMIO_GEN_MASK);
258 mask = (gen & MMIO_GEN_LOW_MASK) << MMIO_SPTE_GEN_LOW_SHIFT;
259 mask |= ((u64)gen >> MMIO_GEN_LOW_SHIFT) << MMIO_SPTE_GEN_HIGH_SHIFT;
263 static unsigned int get_mmio_spte_generation(u64 spte)
267 spte &= ~shadow_mmio_mask;
269 gen = (spte >> MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_GEN_LOW_MASK;
270 gen |= (spte >> MMIO_SPTE_GEN_HIGH_SHIFT) << MMIO_GEN_LOW_SHIFT;
274 static unsigned int kvm_current_mmio_generation(struct kvm_vcpu *vcpu)
276 return kvm_vcpu_memslots(vcpu)->generation & MMIO_GEN_MASK;
279 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
282 unsigned int gen = kvm_current_mmio_generation(vcpu);
283 u64 mask = generation_mmio_spte_mask(gen);
285 access &= ACC_WRITE_MASK | ACC_USER_MASK;
286 mask |= shadow_mmio_mask | access | gfn << PAGE_SHIFT;
288 trace_mark_mmio_spte(sptep, gfn, access, gen);
289 mmu_spte_set(sptep, mask);
292 static bool is_mmio_spte(u64 spte)
294 return (spte & shadow_mmio_mask) == shadow_mmio_mask;
297 static gfn_t get_mmio_spte_gfn(u64 spte)
299 u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
300 return (spte & ~mask) >> PAGE_SHIFT;
303 static unsigned get_mmio_spte_access(u64 spte)
305 u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
306 return (spte & ~mask) & ~PAGE_MASK;
309 static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
310 kvm_pfn_t pfn, unsigned access)
312 if (unlikely(is_noslot_pfn(pfn))) {
313 mark_mmio_spte(vcpu, sptep, gfn, access);
320 static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
322 unsigned int kvm_gen, spte_gen;
324 kvm_gen = kvm_current_mmio_generation(vcpu);
325 spte_gen = get_mmio_spte_generation(spte);
327 trace_check_mmio_spte(spte, kvm_gen, spte_gen);
328 return likely(kvm_gen == spte_gen);
331 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
332 u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask)
334 shadow_user_mask = user_mask;
335 shadow_accessed_mask = accessed_mask;
336 shadow_dirty_mask = dirty_mask;
337 shadow_nx_mask = nx_mask;
338 shadow_x_mask = x_mask;
339 shadow_present_mask = p_mask;
341 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
343 static int is_cpuid_PSE36(void)
348 static int is_nx(struct kvm_vcpu *vcpu)
350 return vcpu->arch.efer & EFER_NX;
353 static int is_shadow_present_pte(u64 pte)
355 return (pte & 0xFFFFFFFFull) && !is_mmio_spte(pte);
358 static int is_large_pte(u64 pte)
360 return pte & PT_PAGE_SIZE_MASK;
363 static int is_last_spte(u64 pte, int level)
365 if (level == PT_PAGE_TABLE_LEVEL)
367 if (is_large_pte(pte))
372 static bool is_executable_pte(u64 spte)
374 return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask;
377 static kvm_pfn_t spte_to_pfn(u64 pte)
379 return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
382 static gfn_t pse36_gfn_delta(u32 gpte)
384 int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
386 return (gpte & PT32_DIR_PSE36_MASK) << shift;
390 static void __set_spte(u64 *sptep, u64 spte)
392 WRITE_ONCE(*sptep, spte);
395 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
397 WRITE_ONCE(*sptep, spte);
400 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
402 return xchg(sptep, spte);
405 static u64 __get_spte_lockless(u64 *sptep)
407 return ACCESS_ONCE(*sptep);
418 static void count_spte_clear(u64 *sptep, u64 spte)
420 struct kvm_mmu_page *sp = page_header(__pa(sptep));
422 if (is_shadow_present_pte(spte))
425 /* Ensure the spte is completely set before we increase the count */
427 sp->clear_spte_count++;
430 static void __set_spte(u64 *sptep, u64 spte)
432 union split_spte *ssptep, sspte;
434 ssptep = (union split_spte *)sptep;
435 sspte = (union split_spte)spte;
437 ssptep->spte_high = sspte.spte_high;
440 * If we map the spte from nonpresent to present, We should store
441 * the high bits firstly, then set present bit, so cpu can not
442 * fetch this spte while we are setting the spte.
446 WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
449 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
451 union split_spte *ssptep, sspte;
453 ssptep = (union split_spte *)sptep;
454 sspte = (union split_spte)spte;
456 WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
459 * If we map the spte from present to nonpresent, we should clear
460 * present bit firstly to avoid vcpu fetch the old high bits.
464 ssptep->spte_high = sspte.spte_high;
465 count_spte_clear(sptep, spte);
468 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
470 union split_spte *ssptep, sspte, orig;
472 ssptep = (union split_spte *)sptep;
473 sspte = (union split_spte)spte;
475 /* xchg acts as a barrier before the setting of the high bits */
476 orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
477 orig.spte_high = ssptep->spte_high;
478 ssptep->spte_high = sspte.spte_high;
479 count_spte_clear(sptep, spte);
485 * The idea using the light way get the spte on x86_32 guest is from
486 * gup_get_pte(arch/x86/mm/gup.c).
488 * An spte tlb flush may be pending, because kvm_set_pte_rmapp
489 * coalesces them and we are running out of the MMU lock. Therefore
490 * we need to protect against in-progress updates of the spte.
492 * Reading the spte while an update is in progress may get the old value
493 * for the high part of the spte. The race is fine for a present->non-present
494 * change (because the high part of the spte is ignored for non-present spte),
495 * but for a present->present change we must reread the spte.
497 * All such changes are done in two steps (present->non-present and
498 * non-present->present), hence it is enough to count the number of
499 * present->non-present updates: if it changed while reading the spte,
500 * we might have hit the race. This is done using clear_spte_count.
502 static u64 __get_spte_lockless(u64 *sptep)
504 struct kvm_mmu_page *sp = page_header(__pa(sptep));
505 union split_spte spte, *orig = (union split_spte *)sptep;
509 count = sp->clear_spte_count;
512 spte.spte_low = orig->spte_low;
515 spte.spte_high = orig->spte_high;
518 if (unlikely(spte.spte_low != orig->spte_low ||
519 count != sp->clear_spte_count))
526 static bool spte_is_locklessly_modifiable(u64 spte)
528 return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) ==
529 (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE);
532 static bool spte_has_volatile_bits(u64 spte)
535 * Always atomically update spte if it can be updated
536 * out of mmu-lock, it can ensure dirty bit is not lost,
537 * also, it can help us to get a stable is_writable_pte()
538 * to ensure tlb flush is not missed.
540 if (spte_is_locklessly_modifiable(spte))
543 if (!shadow_accessed_mask)
546 if (!is_shadow_present_pte(spte))
549 if ((spte & shadow_accessed_mask) &&
550 (!is_writable_pte(spte) || (spte & shadow_dirty_mask)))
556 static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask)
558 return (old_spte & bit_mask) && !(new_spte & bit_mask);
561 static bool spte_is_bit_changed(u64 old_spte, u64 new_spte, u64 bit_mask)
563 return (old_spte & bit_mask) != (new_spte & bit_mask);
566 /* Rules for using mmu_spte_set:
567 * Set the sptep from nonpresent to present.
568 * Note: the sptep being assigned *must* be either not present
569 * or in a state where the hardware will not attempt to update
572 static void mmu_spte_set(u64 *sptep, u64 new_spte)
574 WARN_ON(is_shadow_present_pte(*sptep));
575 __set_spte(sptep, new_spte);
578 /* Rules for using mmu_spte_update:
579 * Update the state bits, it means the mapped pfn is not changed.
581 * Whenever we overwrite a writable spte with a read-only one we
582 * should flush remote TLBs. Otherwise rmap_write_protect
583 * will find a read-only spte, even though the writable spte
584 * might be cached on a CPU's TLB, the return value indicates this
587 static bool mmu_spte_update(u64 *sptep, u64 new_spte)
589 u64 old_spte = *sptep;
592 WARN_ON(!is_shadow_present_pte(new_spte));
594 if (!is_shadow_present_pte(old_spte)) {
595 mmu_spte_set(sptep, new_spte);
599 if (!spte_has_volatile_bits(old_spte))
600 __update_clear_spte_fast(sptep, new_spte);
602 old_spte = __update_clear_spte_slow(sptep, new_spte);
605 * For the spte updated out of mmu-lock is safe, since
606 * we always atomically update it, see the comments in
607 * spte_has_volatile_bits().
609 if (spte_is_locklessly_modifiable(old_spte) &&
610 !is_writable_pte(new_spte))
613 if (!shadow_accessed_mask) {
615 * We don't set page dirty when dropping non-writable spte.
616 * So do it now if the new spte is becoming non-writable.
619 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
624 * Flush TLB when accessed/dirty bits are changed in the page tables,
625 * to guarantee consistency between TLB and page tables.
627 if (spte_is_bit_changed(old_spte, new_spte,
628 shadow_accessed_mask | shadow_dirty_mask))
631 if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask))
632 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
633 if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask))
634 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
640 * Rules for using mmu_spte_clear_track_bits:
641 * It sets the sptep from present to nonpresent, and track the
642 * state bits, it is used to clear the last level sptep.
644 static int mmu_spte_clear_track_bits(u64 *sptep)
647 u64 old_spte = *sptep;
649 if (!spte_has_volatile_bits(old_spte))
650 __update_clear_spte_fast(sptep, 0ull);
652 old_spte = __update_clear_spte_slow(sptep, 0ull);
654 if (!is_shadow_present_pte(old_spte))
657 pfn = spte_to_pfn(old_spte);
660 * KVM does not hold the refcount of the page used by
661 * kvm mmu, before reclaiming the page, we should
662 * unmap it from mmu first.
664 WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
666 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
667 kvm_set_pfn_accessed(pfn);
668 if (old_spte & (shadow_dirty_mask ? shadow_dirty_mask :
670 kvm_set_pfn_dirty(pfn);
675 * Rules for using mmu_spte_clear_no_track:
676 * Directly clear spte without caring the state bits of sptep,
677 * it is used to set the upper level spte.
679 static void mmu_spte_clear_no_track(u64 *sptep)
681 __update_clear_spte_fast(sptep, 0ull);
684 static u64 mmu_spte_get_lockless(u64 *sptep)
686 return __get_spte_lockless(sptep);
689 static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
692 * Prevent page table teardown by making any free-er wait during
693 * kvm_flush_remote_tlbs() IPI to all active vcpus.
698 * Make sure a following spte read is not reordered ahead of the write
701 smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
704 static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
707 * Make sure the write to vcpu->mode is not reordered in front of
708 * reads to sptes. If it does, kvm_commit_zap_page() can see us
709 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
711 smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
715 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
716 struct kmem_cache *base_cache, int min)
720 if (cache->nobjs >= min)
722 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
723 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
726 cache->objects[cache->nobjs++] = obj;
731 static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache)
736 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
737 struct kmem_cache *cache)
740 kmem_cache_free(cache, mc->objects[--mc->nobjs]);
743 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
748 if (cache->nobjs >= min)
750 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
751 page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
754 cache->objects[cache->nobjs++] = page;
759 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
762 free_page((unsigned long)mc->objects[--mc->nobjs]);
765 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
769 r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
770 pte_list_desc_cache, 8 + PTE_PREFETCH_NUM);
773 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
776 r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
777 mmu_page_header_cache, 4);
782 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
784 mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
785 pte_list_desc_cache);
786 mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
787 mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache,
788 mmu_page_header_cache);
791 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
796 p = mc->objects[--mc->nobjs];
800 static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
802 return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
805 static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
807 kmem_cache_free(pte_list_desc_cache, pte_list_desc);
810 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
812 if (!sp->role.direct)
813 return sp->gfns[index];
815 return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
818 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
820 if (!sp->role.direct) {
821 sp->gfns[index] = gfn;
825 if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index)))
826 pr_err_ratelimited("gfn mismatch under direct page %llx "
827 "(expected %llx, got %llx)\n",
829 kvm_mmu_page_get_gfn(sp, index), gfn);
833 * Return the pointer to the large page information for a given gfn,
834 * handling slots that are not large page aligned.
836 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
837 struct kvm_memory_slot *slot,
842 idx = gfn_to_index(gfn, slot->base_gfn, level);
843 return &slot->arch.lpage_info[level - 2][idx];
846 static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot,
847 gfn_t gfn, int count)
849 struct kvm_lpage_info *linfo;
852 for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
853 linfo = lpage_info_slot(gfn, slot, i);
854 linfo->disallow_lpage += count;
855 WARN_ON(linfo->disallow_lpage < 0);
859 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
861 update_gfn_disallow_lpage_count(slot, gfn, 1);
864 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
866 update_gfn_disallow_lpage_count(slot, gfn, -1);
869 static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
871 struct kvm_memslots *slots;
872 struct kvm_memory_slot *slot;
875 kvm->arch.indirect_shadow_pages++;
877 slots = kvm_memslots_for_spte_role(kvm, sp->role);
878 slot = __gfn_to_memslot(slots, gfn);
880 /* the non-leaf shadow pages are keeping readonly. */
881 if (sp->role.level > PT_PAGE_TABLE_LEVEL)
882 return kvm_slot_page_track_add_page(kvm, slot, gfn,
883 KVM_PAGE_TRACK_WRITE);
885 kvm_mmu_gfn_disallow_lpage(slot, gfn);
888 static void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
890 if (sp->lpage_disallowed)
893 ++kvm->stat.nx_lpage_splits;
894 list_add_tail(&sp->lpage_disallowed_link,
895 &kvm->arch.lpage_disallowed_mmu_pages);
896 sp->lpage_disallowed = true;
899 static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
901 struct kvm_memslots *slots;
902 struct kvm_memory_slot *slot;
905 kvm->arch.indirect_shadow_pages--;
907 slots = kvm_memslots_for_spte_role(kvm, sp->role);
908 slot = __gfn_to_memslot(slots, gfn);
909 if (sp->role.level > PT_PAGE_TABLE_LEVEL)
910 return kvm_slot_page_track_remove_page(kvm, slot, gfn,
911 KVM_PAGE_TRACK_WRITE);
913 kvm_mmu_gfn_allow_lpage(slot, gfn);
916 static void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
918 --kvm->stat.nx_lpage_splits;
919 sp->lpage_disallowed = false;
920 list_del(&sp->lpage_disallowed_link);
923 static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level,
924 struct kvm_memory_slot *slot)
926 struct kvm_lpage_info *linfo;
929 linfo = lpage_info_slot(gfn, slot, level);
930 return !!linfo->disallow_lpage;
936 static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn,
939 struct kvm_memory_slot *slot;
941 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
942 return __mmu_gfn_lpage_is_disallowed(gfn, level, slot);
945 static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
947 unsigned long page_size;
950 page_size = kvm_host_page_size(kvm, gfn);
952 for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
953 if (page_size >= KVM_HPAGE_SIZE(i))
962 static inline bool memslot_valid_for_gpte(struct kvm_memory_slot *slot,
965 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
967 if (no_dirty_log && slot->dirty_bitmap)
973 static struct kvm_memory_slot *
974 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
977 struct kvm_memory_slot *slot;
979 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
980 if (!memslot_valid_for_gpte(slot, no_dirty_log))
986 static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
987 bool *force_pt_level)
989 int host_level, level, max_level;
990 struct kvm_memory_slot *slot;
992 if (unlikely(*force_pt_level))
993 return PT_PAGE_TABLE_LEVEL;
995 slot = kvm_vcpu_gfn_to_memslot(vcpu, large_gfn);
996 *force_pt_level = !memslot_valid_for_gpte(slot, true);
997 if (unlikely(*force_pt_level))
998 return PT_PAGE_TABLE_LEVEL;
1000 host_level = host_mapping_level(vcpu->kvm, large_gfn);
1002 if (host_level == PT_PAGE_TABLE_LEVEL)
1005 max_level = min(kvm_x86_ops->get_lpage_level(), host_level);
1007 for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
1008 if (__mmu_gfn_lpage_is_disallowed(large_gfn, level, slot))
1015 * About rmap_head encoding:
1017 * If the bit zero of rmap_head->val is clear, then it points to the only spte
1018 * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
1019 * pte_list_desc containing more mappings.
1023 * Returns the number of pointers in the rmap chain, not counting the new one.
1025 static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
1026 struct kvm_rmap_head *rmap_head)
1028 struct pte_list_desc *desc;
1031 if (!rmap_head->val) {
1032 rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte);
1033 rmap_head->val = (unsigned long)spte;
1034 } else if (!(rmap_head->val & 1)) {
1035 rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte);
1036 desc = mmu_alloc_pte_list_desc(vcpu);
1037 desc->sptes[0] = (u64 *)rmap_head->val;
1038 desc->sptes[1] = spte;
1039 rmap_head->val = (unsigned long)desc | 1;
1042 rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
1043 desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1044 while (desc->sptes[PTE_LIST_EXT-1] && desc->more) {
1046 count += PTE_LIST_EXT;
1048 if (desc->sptes[PTE_LIST_EXT-1]) {
1049 desc->more = mmu_alloc_pte_list_desc(vcpu);
1052 for (i = 0; desc->sptes[i]; ++i)
1054 desc->sptes[i] = spte;
1060 pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
1061 struct pte_list_desc *desc, int i,
1062 struct pte_list_desc *prev_desc)
1066 for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j)
1068 desc->sptes[i] = desc->sptes[j];
1069 desc->sptes[j] = NULL;
1072 if (!prev_desc && !desc->more)
1073 rmap_head->val = (unsigned long)desc->sptes[0];
1076 prev_desc->more = desc->more;
1078 rmap_head->val = (unsigned long)desc->more | 1;
1079 mmu_free_pte_list_desc(desc);
1082 static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
1084 struct pte_list_desc *desc;
1085 struct pte_list_desc *prev_desc;
1088 if (!rmap_head->val) {
1089 printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte);
1091 } else if (!(rmap_head->val & 1)) {
1092 rmap_printk("pte_list_remove: %p 1->0\n", spte);
1093 if ((u64 *)rmap_head->val != spte) {
1094 printk(KERN_ERR "pte_list_remove: %p 1->BUG\n", spte);
1099 rmap_printk("pte_list_remove: %p many->many\n", spte);
1100 desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1103 for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
1104 if (desc->sptes[i] == spte) {
1105 pte_list_desc_remove_entry(rmap_head,
1106 desc, i, prev_desc);
1113 pr_err("pte_list_remove: %p many->many\n", spte);
1118 static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
1119 struct kvm_memory_slot *slot)
1123 idx = gfn_to_index(gfn, slot->base_gfn, level);
1124 return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx];
1127 static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
1128 struct kvm_mmu_page *sp)
1130 struct kvm_memslots *slots;
1131 struct kvm_memory_slot *slot;
1133 slots = kvm_memslots_for_spte_role(kvm, sp->role);
1134 slot = __gfn_to_memslot(slots, gfn);
1135 return __gfn_to_rmap(gfn, sp->role.level, slot);
1138 static bool rmap_can_add(struct kvm_vcpu *vcpu)
1140 struct kvm_mmu_memory_cache *cache;
1142 cache = &vcpu->arch.mmu_pte_list_desc_cache;
1143 return mmu_memory_cache_free_objects(cache);
1146 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1148 struct kvm_mmu_page *sp;
1149 struct kvm_rmap_head *rmap_head;
1151 sp = page_header(__pa(spte));
1152 kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
1153 rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
1154 return pte_list_add(vcpu, spte, rmap_head);
1157 static void rmap_remove(struct kvm *kvm, u64 *spte)
1159 struct kvm_mmu_page *sp;
1161 struct kvm_rmap_head *rmap_head;
1163 sp = page_header(__pa(spte));
1164 gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
1165 rmap_head = gfn_to_rmap(kvm, gfn, sp);
1166 pte_list_remove(spte, rmap_head);
1170 * Used by the following functions to iterate through the sptes linked by a
1171 * rmap. All fields are private and not assumed to be used outside.
1173 struct rmap_iterator {
1174 /* private fields */
1175 struct pte_list_desc *desc; /* holds the sptep if not NULL */
1176 int pos; /* index of the sptep */
1180 * Iteration must be started by this function. This should also be used after
1181 * removing/dropping sptes from the rmap link because in such cases the
1182 * information in the itererator may not be valid.
1184 * Returns sptep if found, NULL otherwise.
1186 static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
1187 struct rmap_iterator *iter)
1191 if (!rmap_head->val)
1194 if (!(rmap_head->val & 1)) {
1196 sptep = (u64 *)rmap_head->val;
1200 iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1202 sptep = iter->desc->sptes[iter->pos];
1204 BUG_ON(!is_shadow_present_pte(*sptep));
1209 * Must be used with a valid iterator: e.g. after rmap_get_first().
1211 * Returns sptep if found, NULL otherwise.
1213 static u64 *rmap_get_next(struct rmap_iterator *iter)
1218 if (iter->pos < PTE_LIST_EXT - 1) {
1220 sptep = iter->desc->sptes[iter->pos];
1225 iter->desc = iter->desc->more;
1229 /* desc->sptes[0] cannot be NULL */
1230 sptep = iter->desc->sptes[iter->pos];
1237 BUG_ON(!is_shadow_present_pte(*sptep));
1241 #define for_each_rmap_spte(_rmap_head_, _iter_, _spte_) \
1242 for (_spte_ = rmap_get_first(_rmap_head_, _iter_); \
1243 _spte_; _spte_ = rmap_get_next(_iter_))
1245 static void drop_spte(struct kvm *kvm, u64 *sptep)
1247 if (mmu_spte_clear_track_bits(sptep))
1248 rmap_remove(kvm, sptep);
1252 static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
1254 if (is_large_pte(*sptep)) {
1255 WARN_ON(page_header(__pa(sptep))->role.level ==
1256 PT_PAGE_TABLE_LEVEL);
1257 drop_spte(kvm, sptep);
1265 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1267 if (__drop_large_spte(vcpu->kvm, sptep))
1268 kvm_flush_remote_tlbs(vcpu->kvm);
1272 * Write-protect on the specified @sptep, @pt_protect indicates whether
1273 * spte write-protection is caused by protecting shadow page table.
1275 * Note: write protection is difference between dirty logging and spte
1277 * - for dirty logging, the spte can be set to writable at anytime if
1278 * its dirty bitmap is properly set.
1279 * - for spte protection, the spte can be writable only after unsync-ing
1282 * Return true if tlb need be flushed.
1284 static bool spte_write_protect(u64 *sptep, bool pt_protect)
1288 if (!is_writable_pte(spte) &&
1289 !(pt_protect && spte_is_locklessly_modifiable(spte)))
1292 rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
1295 spte &= ~SPTE_MMU_WRITEABLE;
1296 spte = spte & ~PT_WRITABLE_MASK;
1298 return mmu_spte_update(sptep, spte);
1301 static bool __rmap_write_protect(struct kvm *kvm,
1302 struct kvm_rmap_head *rmap_head,
1306 struct rmap_iterator iter;
1309 for_each_rmap_spte(rmap_head, &iter, sptep)
1310 flush |= spte_write_protect(sptep, pt_protect);
1315 static bool spte_clear_dirty(u64 *sptep)
1319 rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep);
1321 spte &= ~shadow_dirty_mask;
1323 return mmu_spte_update(sptep, spte);
1326 static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1329 struct rmap_iterator iter;
1332 for_each_rmap_spte(rmap_head, &iter, sptep)
1333 flush |= spte_clear_dirty(sptep);
1338 static bool spte_set_dirty(u64 *sptep)
1342 rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);
1344 spte |= shadow_dirty_mask;
1346 return mmu_spte_update(sptep, spte);
1349 static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1352 struct rmap_iterator iter;
1355 for_each_rmap_spte(rmap_head, &iter, sptep)
1356 flush |= spte_set_dirty(sptep);
1362 * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1363 * @kvm: kvm instance
1364 * @slot: slot to protect
1365 * @gfn_offset: start of the BITS_PER_LONG pages we care about
1366 * @mask: indicates which pages we should protect
1368 * Used when we do not need to care about huge page mappings: e.g. during dirty
1369 * logging we do not have any such mappings.
1371 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1372 struct kvm_memory_slot *slot,
1373 gfn_t gfn_offset, unsigned long mask)
1375 struct kvm_rmap_head *rmap_head;
1378 rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1379 PT_PAGE_TABLE_LEVEL, slot);
1380 __rmap_write_protect(kvm, rmap_head, false);
1382 /* clear the first set bit */
1388 * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages
1389 * @kvm: kvm instance
1390 * @slot: slot to clear D-bit
1391 * @gfn_offset: start of the BITS_PER_LONG pages we care about
1392 * @mask: indicates which pages we should clear D-bit
1394 * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
1396 void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1397 struct kvm_memory_slot *slot,
1398 gfn_t gfn_offset, unsigned long mask)
1400 struct kvm_rmap_head *rmap_head;
1403 rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1404 PT_PAGE_TABLE_LEVEL, slot);
1405 __rmap_clear_dirty(kvm, rmap_head);
1407 /* clear the first set bit */
1411 EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked);
1414 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1417 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1418 * enable dirty logging for them.
1420 * Used when we do not need to care about huge page mappings: e.g. during dirty
1421 * logging we do not have any such mappings.
1423 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1424 struct kvm_memory_slot *slot,
1425 gfn_t gfn_offset, unsigned long mask)
1427 if (kvm_x86_ops->enable_log_dirty_pt_masked)
1428 kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
1431 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1434 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
1435 struct kvm_memory_slot *slot, u64 gfn)
1437 struct kvm_rmap_head *rmap_head;
1439 bool write_protected = false;
1441 for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
1442 rmap_head = __gfn_to_rmap(gfn, i, slot);
1443 write_protected |= __rmap_write_protect(kvm, rmap_head, true);
1446 return write_protected;
1449 static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
1451 struct kvm_memory_slot *slot;
1453 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1454 return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
1457 static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1460 struct rmap_iterator iter;
1463 while ((sptep = rmap_get_first(rmap_head, &iter))) {
1464 rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep);
1466 drop_spte(kvm, sptep);
1473 static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1474 struct kvm_memory_slot *slot, gfn_t gfn, int level,
1477 return kvm_zap_rmapp(kvm, rmap_head);
1480 static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1481 struct kvm_memory_slot *slot, gfn_t gfn, int level,
1485 struct rmap_iterator iter;
1488 pte_t *ptep = (pte_t *)data;
1491 WARN_ON(pte_huge(*ptep));
1492 new_pfn = pte_pfn(*ptep);
1495 for_each_rmap_spte(rmap_head, &iter, sptep) {
1496 rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n",
1497 sptep, *sptep, gfn, level);
1501 if (pte_write(*ptep)) {
1502 drop_spte(kvm, sptep);
1505 new_spte = *sptep & ~PT64_BASE_ADDR_MASK;
1506 new_spte |= (u64)new_pfn << PAGE_SHIFT;
1508 new_spte &= ~PT_WRITABLE_MASK;
1509 new_spte &= ~SPTE_HOST_WRITEABLE;
1510 new_spte &= ~shadow_accessed_mask;
1512 mmu_spte_clear_track_bits(sptep);
1513 mmu_spte_set(sptep, new_spte);
1518 kvm_flush_remote_tlbs(kvm);
1523 struct slot_rmap_walk_iterator {
1525 struct kvm_memory_slot *slot;
1531 /* output fields. */
1533 struct kvm_rmap_head *rmap;
1536 /* private field. */
1537 struct kvm_rmap_head *end_rmap;
1541 rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
1543 iterator->level = level;
1544 iterator->gfn = iterator->start_gfn;
1545 iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot);
1546 iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level,
1551 slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1552 struct kvm_memory_slot *slot, int start_level,
1553 int end_level, gfn_t start_gfn, gfn_t end_gfn)
1555 iterator->slot = slot;
1556 iterator->start_level = start_level;
1557 iterator->end_level = end_level;
1558 iterator->start_gfn = start_gfn;
1559 iterator->end_gfn = end_gfn;
1561 rmap_walk_init_level(iterator, iterator->start_level);
1564 static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
1566 return !!iterator->rmap;
1569 static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
1571 if (++iterator->rmap <= iterator->end_rmap) {
1572 iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
1576 if (++iterator->level > iterator->end_level) {
1577 iterator->rmap = NULL;
1581 rmap_walk_init_level(iterator, iterator->level);
1584 #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_, \
1585 _start_gfn, _end_gfn, _iter_) \
1586 for (slot_rmap_walk_init(_iter_, _slot_, _start_level_, \
1587 _end_level_, _start_gfn, _end_gfn); \
1588 slot_rmap_walk_okay(_iter_); \
1589 slot_rmap_walk_next(_iter_))
1591 static int kvm_handle_hva_range(struct kvm *kvm,
1592 unsigned long start,
1595 int (*handler)(struct kvm *kvm,
1596 struct kvm_rmap_head *rmap_head,
1597 struct kvm_memory_slot *slot,
1600 unsigned long data))
1602 struct kvm_memslots *slots;
1603 struct kvm_memory_slot *memslot;
1604 struct slot_rmap_walk_iterator iterator;
1608 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
1609 slots = __kvm_memslots(kvm, i);
1610 kvm_for_each_memslot(memslot, slots) {
1611 unsigned long hva_start, hva_end;
1612 gfn_t gfn_start, gfn_end;
1614 hva_start = max(start, memslot->userspace_addr);
1615 hva_end = min(end, memslot->userspace_addr +
1616 (memslot->npages << PAGE_SHIFT));
1617 if (hva_start >= hva_end)
1620 * {gfn(page) | page intersects with [hva_start, hva_end)} =
1621 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
1623 gfn_start = hva_to_gfn_memslot(hva_start, memslot);
1624 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
1626 for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL,
1627 PT_MAX_HUGEPAGE_LEVEL,
1628 gfn_start, gfn_end - 1,
1630 ret |= handler(kvm, iterator.rmap, memslot,
1631 iterator.gfn, iterator.level, data);
1638 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
1640 int (*handler)(struct kvm *kvm,
1641 struct kvm_rmap_head *rmap_head,
1642 struct kvm_memory_slot *slot,
1643 gfn_t gfn, int level,
1644 unsigned long data))
1646 return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
1649 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
1651 return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
1654 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
1656 return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
1659 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1661 kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
1664 static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1665 struct kvm_memory_slot *slot, gfn_t gfn, int level,
1669 struct rmap_iterator uninitialized_var(iter);
1672 BUG_ON(!shadow_accessed_mask);
1674 for_each_rmap_spte(rmap_head, &iter, sptep) {
1675 if (*sptep & shadow_accessed_mask) {
1677 clear_bit((ffs(shadow_accessed_mask) - 1),
1678 (unsigned long *)sptep);
1682 trace_kvm_age_page(gfn, level, slot, young);
1686 static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1687 struct kvm_memory_slot *slot, gfn_t gfn,
1688 int level, unsigned long data)
1691 struct rmap_iterator iter;
1695 * If there's no access bit in the secondary pte set by the
1696 * hardware it's up to gup-fast/gup to set the access bit in
1697 * the primary pte or in the page structure.
1699 if (!shadow_accessed_mask)
1702 for_each_rmap_spte(rmap_head, &iter, sptep) {
1703 if (*sptep & shadow_accessed_mask) {
1712 #define RMAP_RECYCLE_THRESHOLD 1000
1714 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1716 struct kvm_rmap_head *rmap_head;
1717 struct kvm_mmu_page *sp;
1719 sp = page_header(__pa(spte));
1721 rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
1723 kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0);
1724 kvm_flush_remote_tlbs(vcpu->kvm);
1727 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1730 * In case of absence of EPT Access and Dirty Bits supports,
1731 * emulate the accessed bit for EPT, by checking if this page has
1732 * an EPT mapping, and clearing it if it does. On the next access,
1733 * a new EPT mapping will be established.
1734 * This has some overhead, but not as much as the cost of swapping
1735 * out actively used pages or breaking up actively used hugepages.
1737 if (!shadow_accessed_mask) {
1739 * We are holding the kvm->mmu_lock, and we are blowing up
1740 * shadow PTEs. MMU notifier consumers need to be kept at bay.
1741 * This is correct as long as we don't decouple the mmu_lock
1742 * protected regions (like invalidate_range_start|end does).
1744 kvm->mmu_notifier_seq++;
1745 return kvm_handle_hva_range(kvm, start, end, 0,
1749 return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
1752 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1754 return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
1758 static int is_empty_shadow_page(u64 *spt)
1763 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
1764 if (is_shadow_present_pte(*pos)) {
1765 printk(KERN_ERR "%s: %p %llx\n", __func__,
1774 * This value is the sum of all of the kvm instances's
1775 * kvm->arch.n_used_mmu_pages values. We need a global,
1776 * aggregate version in order to make the slab shrinker
1779 static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
1781 kvm->arch.n_used_mmu_pages += nr;
1782 percpu_counter_add(&kvm_total_used_mmu_pages, nr);
1785 static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
1787 MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
1788 hlist_del(&sp->hash_link);
1789 list_del(&sp->link);
1790 free_page((unsigned long)sp->spt);
1791 if (!sp->role.direct)
1792 free_page((unsigned long)sp->gfns);
1793 kmem_cache_free(mmu_page_header_cache, sp);
1796 static unsigned kvm_page_table_hashfn(gfn_t gfn)
1798 return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
1801 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1802 struct kvm_mmu_page *sp, u64 *parent_pte)
1807 pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
1810 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1813 pte_list_remove(parent_pte, &sp->parent_ptes);
1816 static void drop_parent_pte(struct kvm_mmu_page *sp,
1819 mmu_page_remove_parent_pte(sp, parent_pte);
1820 mmu_spte_clear_no_track(parent_pte);
1823 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
1825 struct kvm_mmu_page *sp;
1827 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
1828 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
1830 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
1831 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1834 * The active_mmu_pages list is the FIFO list, do not move the
1835 * page until it is zapped. kvm_zap_obsolete_pages depends on
1836 * this feature. See the comments in kvm_zap_obsolete_pages().
1838 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
1839 kvm_mod_used_mmu_pages(vcpu->kvm, +1);
1843 static void mark_unsync(u64 *spte);
1844 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1847 struct rmap_iterator iter;
1849 for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
1854 static void mark_unsync(u64 *spte)
1856 struct kvm_mmu_page *sp;
1859 sp = page_header(__pa(spte));
1860 index = spte - sp->spt;
1861 if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1863 if (sp->unsync_children++)
1865 kvm_mmu_mark_parents_unsync(sp);
1868 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1869 struct kvm_mmu_page *sp)
1874 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
1878 static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
1879 struct kvm_mmu_page *sp, u64 *spte,
1885 #define KVM_PAGE_ARRAY_NR 16
1887 struct kvm_mmu_pages {
1888 struct mmu_page_and_offset {
1889 struct kvm_mmu_page *sp;
1891 } page[KVM_PAGE_ARRAY_NR];
1895 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1901 for (i=0; i < pvec->nr; i++)
1902 if (pvec->page[i].sp == sp)
1905 pvec->page[pvec->nr].sp = sp;
1906 pvec->page[pvec->nr].idx = idx;
1908 return (pvec->nr == KVM_PAGE_ARRAY_NR);
1911 static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
1913 --sp->unsync_children;
1914 WARN_ON((int)sp->unsync_children < 0);
1915 __clear_bit(idx, sp->unsync_child_bitmap);
1918 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1919 struct kvm_mmu_pages *pvec)
1921 int i, ret, nr_unsync_leaf = 0;
1923 for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1924 struct kvm_mmu_page *child;
1925 u64 ent = sp->spt[i];
1927 if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
1928 clear_unsync_child_bit(sp, i);
1932 child = page_header(ent & PT64_BASE_ADDR_MASK);
1934 if (child->unsync_children) {
1935 if (mmu_pages_add(pvec, child, i))
1938 ret = __mmu_unsync_walk(child, pvec);
1940 clear_unsync_child_bit(sp, i);
1942 } else if (ret > 0) {
1943 nr_unsync_leaf += ret;
1946 } else if (child->unsync) {
1948 if (mmu_pages_add(pvec, child, i))
1951 clear_unsync_child_bit(sp, i);
1954 return nr_unsync_leaf;
1957 #define INVALID_INDEX (-1)
1959 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1960 struct kvm_mmu_pages *pvec)
1963 if (!sp->unsync_children)
1966 mmu_pages_add(pvec, sp, INVALID_INDEX);
1967 return __mmu_unsync_walk(sp, pvec);
1970 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1972 WARN_ON(!sp->unsync);
1973 trace_kvm_mmu_sync_page(sp);
1975 --kvm->stat.mmu_unsync;
1978 static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1979 struct list_head *invalid_list);
1980 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1981 struct list_head *invalid_list);
1984 * NOTE: we should pay more attention on the zapped-obsolete page
1985 * (is_obsolete_sp(sp) && sp->role.invalid) when you do hash list walk
1986 * since it has been deleted from active_mmu_pages but still can be found
1989 * for_each_gfn_valid_sp() has skipped that kind of pages.
1991 #define for_each_gfn_valid_sp(_kvm, _sp, _gfn) \
1992 hlist_for_each_entry(_sp, \
1993 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
1994 if ((_sp)->gfn != (_gfn) || is_obsolete_sp((_kvm), (_sp)) \
1995 || (_sp)->role.invalid) {} else
1997 #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \
1998 for_each_gfn_valid_sp(_kvm, _sp, _gfn) \
1999 if ((_sp)->role.direct) {} else
2001 /* @sp->gfn should be write-protected at the call site */
2002 static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
2003 struct list_head *invalid_list)
2005 if (sp->role.cr4_pae != !!is_pae(vcpu)) {
2006 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
2010 if (vcpu->arch.mmu.sync_page(vcpu, sp) == 0) {
2011 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
2018 static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
2019 struct list_head *invalid_list,
2020 bool remote_flush, bool local_flush)
2022 if (!list_empty(invalid_list)) {
2023 kvm_mmu_commit_zap_page(vcpu->kvm, invalid_list);
2028 kvm_flush_remote_tlbs(vcpu->kvm);
2029 else if (local_flush)
2030 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2033 #ifdef CONFIG_KVM_MMU_AUDIT
2034 #include "mmu_audit.c"
2036 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
2037 static void mmu_audit_disable(void) { }
2040 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
2042 return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
2045 static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
2046 struct list_head *invalid_list)
2048 kvm_unlink_unsync_page(vcpu->kvm, sp);
2049 return __kvm_sync_page(vcpu, sp, invalid_list);
2052 /* @gfn should be write-protected at the call site */
2053 static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
2054 struct list_head *invalid_list)
2056 struct kvm_mmu_page *s;
2059 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
2063 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
2064 ret |= kvm_sync_page(vcpu, s, invalid_list);
2070 struct mmu_page_path {
2071 struct kvm_mmu_page *parent[PT64_ROOT_LEVEL];
2072 unsigned int idx[PT64_ROOT_LEVEL];
2075 #define for_each_sp(pvec, sp, parents, i) \
2076 for (i = mmu_pages_first(&pvec, &parents); \
2077 i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \
2078 i = mmu_pages_next(&pvec, &parents, i))
2080 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
2081 struct mmu_page_path *parents,
2086 for (n = i+1; n < pvec->nr; n++) {
2087 struct kvm_mmu_page *sp = pvec->page[n].sp;
2088 unsigned idx = pvec->page[n].idx;
2089 int level = sp->role.level;
2091 parents->idx[level-1] = idx;
2092 if (level == PT_PAGE_TABLE_LEVEL)
2095 parents->parent[level-2] = sp;
2101 static int mmu_pages_first(struct kvm_mmu_pages *pvec,
2102 struct mmu_page_path *parents)
2104 struct kvm_mmu_page *sp;
2110 WARN_ON(pvec->page[0].idx != INVALID_INDEX);
2112 sp = pvec->page[0].sp;
2113 level = sp->role.level;
2114 WARN_ON(level == PT_PAGE_TABLE_LEVEL);
2116 parents->parent[level-2] = sp;
2118 /* Also set up a sentinel. Further entries in pvec are all
2119 * children of sp, so this element is never overwritten.
2121 parents->parent[level-1] = NULL;
2122 return mmu_pages_next(pvec, parents, 0);
2125 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
2127 struct kvm_mmu_page *sp;
2128 unsigned int level = 0;
2131 unsigned int idx = parents->idx[level];
2132 sp = parents->parent[level];
2136 WARN_ON(idx == INVALID_INDEX);
2137 clear_unsync_child_bit(sp, idx);
2139 } while (!sp->unsync_children);
2142 static void mmu_sync_children(struct kvm_vcpu *vcpu,
2143 struct kvm_mmu_page *parent)
2146 struct kvm_mmu_page *sp;
2147 struct mmu_page_path parents;
2148 struct kvm_mmu_pages pages;
2149 LIST_HEAD(invalid_list);
2152 while (mmu_unsync_walk(parent, &pages)) {
2153 bool protected = false;
2155 for_each_sp(pages, sp, parents, i)
2156 protected |= rmap_write_protect(vcpu, sp->gfn);
2159 kvm_flush_remote_tlbs(vcpu->kvm);
2163 for_each_sp(pages, sp, parents, i) {
2164 flush |= kvm_sync_page(vcpu, sp, &invalid_list);
2165 mmu_pages_clear_parents(&parents);
2167 if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) {
2168 kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2169 cond_resched_lock(&vcpu->kvm->mmu_lock);
2174 kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2177 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
2179 atomic_set(&sp->write_flooding_count, 0);
2182 static void clear_sp_write_flooding_count(u64 *spte)
2184 struct kvm_mmu_page *sp = page_header(__pa(spte));
2186 __clear_sp_write_flooding_count(sp);
2189 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
2196 union kvm_mmu_page_role role;
2198 struct kvm_mmu_page *sp;
2199 bool need_sync = false;
2201 LIST_HEAD(invalid_list);
2203 role = vcpu->arch.mmu.base_role;
2205 role.direct = direct;
2208 role.access = access;
2209 if (!vcpu->arch.mmu.direct_map
2210 && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
2211 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
2212 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
2213 role.quadrant = quadrant;
2215 for_each_gfn_valid_sp(vcpu->kvm, sp, gfn) {
2216 if (!need_sync && sp->unsync)
2219 if (sp->role.word != role.word)
2223 /* The page is good, but __kvm_sync_page might still end
2224 * up zapping it. If so, break in order to rebuild it.
2226 if (!__kvm_sync_page(vcpu, sp, &invalid_list))
2229 WARN_ON(!list_empty(&invalid_list));
2230 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2233 if (sp->unsync_children)
2234 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2236 __clear_sp_write_flooding_count(sp);
2237 trace_kvm_mmu_get_page(sp, false);
2241 ++vcpu->kvm->stat.mmu_cache_miss;
2243 sp = kvm_mmu_alloc_page(vcpu, direct);
2247 hlist_add_head(&sp->hash_link,
2248 &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
2251 * we should do write protection before syncing pages
2252 * otherwise the content of the synced shadow page may
2253 * be inconsistent with guest page table.
2255 account_shadowed(vcpu->kvm, sp);
2256 if (level == PT_PAGE_TABLE_LEVEL &&
2257 rmap_write_protect(vcpu, gfn))
2258 kvm_flush_remote_tlbs(vcpu->kvm);
2260 if (level > PT_PAGE_TABLE_LEVEL && need_sync)
2261 flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
2263 sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
2264 clear_page(sp->spt);
2265 trace_kvm_mmu_get_page(sp, true);
2267 kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2271 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
2272 struct kvm_vcpu *vcpu, u64 addr)
2274 iterator->addr = addr;
2275 iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
2276 iterator->level = vcpu->arch.mmu.shadow_root_level;
2278 if (iterator->level == PT64_ROOT_LEVEL &&
2279 vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL &&
2280 !vcpu->arch.mmu.direct_map)
2283 if (iterator->level == PT32E_ROOT_LEVEL) {
2284 iterator->shadow_addr
2285 = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
2286 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
2288 if (!iterator->shadow_addr)
2289 iterator->level = 0;
2293 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
2295 if (iterator->level < PT_PAGE_TABLE_LEVEL)
2298 iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
2299 iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2303 static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
2306 if (is_last_spte(spte, iterator->level)) {
2307 iterator->level = 0;
2311 iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2315 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2317 return __shadow_walk_next(iterator, *iterator->sptep);
2320 static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
2321 struct kvm_mmu_page *sp)
2325 BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2327 spte = __pa(sp->spt) | shadow_present_mask | PT_WRITABLE_MASK |
2328 shadow_user_mask | shadow_x_mask | shadow_accessed_mask;
2330 mmu_spte_set(sptep, spte);
2332 mmu_page_add_parent_pte(vcpu, sp, sptep);
2334 if (sp->unsync_children || sp->unsync)
2338 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2339 unsigned direct_access)
2341 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
2342 struct kvm_mmu_page *child;
2345 * For the direct sp, if the guest pte's dirty bit
2346 * changed form clean to dirty, it will corrupt the
2347 * sp's access: allow writable in the read-only sp,
2348 * so we should update the spte at this point to get
2349 * a new sp with the correct access.
2351 child = page_header(*sptep & PT64_BASE_ADDR_MASK);
2352 if (child->role.access == direct_access)
2355 drop_parent_pte(child, sptep);
2356 kvm_flush_remote_tlbs(vcpu->kvm);
2360 static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
2364 struct kvm_mmu_page *child;
2367 if (is_shadow_present_pte(pte)) {
2368 if (is_last_spte(pte, sp->role.level)) {
2369 drop_spte(kvm, spte);
2370 if (is_large_pte(pte))
2373 child = page_header(pte & PT64_BASE_ADDR_MASK);
2374 drop_parent_pte(child, spte);
2379 if (is_mmio_spte(pte))
2380 mmu_spte_clear_no_track(spte);
2385 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
2386 struct kvm_mmu_page *sp)
2390 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2391 mmu_page_zap_pte(kvm, sp, sp->spt + i);
2394 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2397 struct rmap_iterator iter;
2399 while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2400 drop_parent_pte(sp, sptep);
2403 static int mmu_zap_unsync_children(struct kvm *kvm,
2404 struct kvm_mmu_page *parent,
2405 struct list_head *invalid_list)
2408 struct mmu_page_path parents;
2409 struct kvm_mmu_pages pages;
2411 if (parent->role.level == PT_PAGE_TABLE_LEVEL)
2414 while (mmu_unsync_walk(parent, &pages)) {
2415 struct kvm_mmu_page *sp;
2417 for_each_sp(pages, sp, parents, i) {
2418 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2419 mmu_pages_clear_parents(&parents);
2427 static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2428 struct list_head *invalid_list)
2432 trace_kvm_mmu_prepare_zap_page(sp);
2433 ++kvm->stat.mmu_shadow_zapped;
2434 ret = mmu_zap_unsync_children(kvm, sp, invalid_list);
2435 kvm_mmu_page_unlink_children(kvm, sp);
2436 kvm_mmu_unlink_parents(kvm, sp);
2438 if (!sp->role.invalid && !sp->role.direct)
2439 unaccount_shadowed(kvm, sp);
2442 kvm_unlink_unsync_page(kvm, sp);
2443 if (!sp->root_count) {
2446 list_move(&sp->link, invalid_list);
2447 kvm_mod_used_mmu_pages(kvm, -1);
2449 list_move(&sp->link, &kvm->arch.active_mmu_pages);
2452 * The obsolete pages can not be used on any vcpus.
2453 * See the comments in kvm_mmu_invalidate_zap_all_pages().
2455 if (!sp->role.invalid && !is_obsolete_sp(kvm, sp))
2456 kvm_reload_remote_mmus(kvm);
2459 if (sp->lpage_disallowed)
2460 unaccount_huge_nx_page(kvm, sp);
2462 sp->role.invalid = 1;
2466 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2467 struct list_head *invalid_list)
2469 struct kvm_mmu_page *sp, *nsp;
2471 if (list_empty(invalid_list))
2475 * We need to make sure everyone sees our modifications to
2476 * the page tables and see changes to vcpu->mode here. The barrier
2477 * in the kvm_flush_remote_tlbs() achieves this. This pairs
2478 * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
2480 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
2481 * guest mode and/or lockless shadow page table walks.
2483 kvm_flush_remote_tlbs(kvm);
2485 list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2486 WARN_ON(!sp->role.invalid || sp->root_count);
2487 kvm_mmu_free_page(sp);
2491 static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
2492 struct list_head *invalid_list)
2494 struct kvm_mmu_page *sp;
2496 if (list_empty(&kvm->arch.active_mmu_pages))
2499 sp = list_last_entry(&kvm->arch.active_mmu_pages,
2500 struct kvm_mmu_page, link);
2501 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2507 * Changing the number of mmu pages allocated to the vm
2508 * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2510 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
2512 LIST_HEAD(invalid_list);
2514 spin_lock(&kvm->mmu_lock);
2516 if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2517 /* Need to free some mmu pages to achieve the goal. */
2518 while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages)
2519 if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list))
2522 kvm_mmu_commit_zap_page(kvm, &invalid_list);
2523 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2526 kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2528 spin_unlock(&kvm->mmu_lock);
2531 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2533 struct kvm_mmu_page *sp;
2534 LIST_HEAD(invalid_list);
2537 pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2539 spin_lock(&kvm->mmu_lock);
2540 for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2541 pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2544 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2546 kvm_mmu_commit_zap_page(kvm, &invalid_list);
2547 spin_unlock(&kvm->mmu_lock);
2551 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
2553 static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2555 trace_kvm_mmu_unsync_page(sp);
2556 ++vcpu->kvm->stat.mmu_unsync;
2559 kvm_mmu_mark_parents_unsync(sp);
2562 static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
2565 struct kvm_mmu_page *sp;
2567 if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
2570 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
2577 WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
2578 kvm_unsync_page(vcpu, sp);
2584 static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
2587 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
2592 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2593 unsigned pte_access, int level,
2594 gfn_t gfn, kvm_pfn_t pfn, bool speculative,
2595 bool can_unsync, bool host_writable)
2600 if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
2604 * For the EPT case, shadow_present_mask is 0 if hardware
2605 * supports exec-only page table entries. In that case,
2606 * ACC_USER_MASK and shadow_user_mask are used to represent
2607 * read access. See FNAME(gpte_access) in paging_tmpl.h.
2609 spte |= shadow_present_mask;
2611 spte |= shadow_accessed_mask;
2613 if (level > PT_PAGE_TABLE_LEVEL && (pte_access & ACC_EXEC_MASK) &&
2614 is_nx_huge_page_enabled()) {
2615 pte_access &= ~ACC_EXEC_MASK;
2618 if (pte_access & ACC_EXEC_MASK)
2619 spte |= shadow_x_mask;
2621 spte |= shadow_nx_mask;
2623 if (pte_access & ACC_USER_MASK)
2624 spte |= shadow_user_mask;
2626 if (level > PT_PAGE_TABLE_LEVEL)
2627 spte |= PT_PAGE_SIZE_MASK;
2629 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
2630 kvm_is_mmio_pfn(pfn));
2633 spte |= SPTE_HOST_WRITEABLE;
2635 pte_access &= ~ACC_WRITE_MASK;
2637 spte |= (u64)pfn << PAGE_SHIFT;
2639 if (pte_access & ACC_WRITE_MASK) {
2642 * Other vcpu creates new sp in the window between
2643 * mapping_level() and acquiring mmu-lock. We can
2644 * allow guest to retry the access, the mapping can
2645 * be fixed if guest refault.
2647 if (level > PT_PAGE_TABLE_LEVEL &&
2648 mmu_gfn_lpage_is_disallowed(vcpu, gfn, level))
2651 spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
2654 * Optimization: for pte sync, if spte was writable the hash
2655 * lookup is unnecessary (and expensive). Write protection
2656 * is responsibility of mmu_get_page / kvm_sync_page.
2657 * Same reasoning can be applied to dirty page accounting.
2659 if (!can_unsync && is_writable_pte(*sptep))
2662 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
2663 pgprintk("%s: found shadow page for %llx, marking ro\n",
2666 pte_access &= ~ACC_WRITE_MASK;
2667 spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
2671 if (pte_access & ACC_WRITE_MASK) {
2672 kvm_vcpu_mark_page_dirty(vcpu, gfn);
2673 spte |= shadow_dirty_mask;
2677 if (mmu_spte_update(sptep, spte))
2678 kvm_flush_remote_tlbs(vcpu->kvm);
2683 static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
2684 int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn,
2685 bool speculative, bool host_writable)
2687 int was_rmapped = 0;
2689 int ret = RET_PF_RETRY;
2691 pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
2692 *sptep, write_fault, gfn);
2694 if (is_shadow_present_pte(*sptep)) {
2696 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2697 * the parent of the now unreachable PTE.
2699 if (level > PT_PAGE_TABLE_LEVEL &&
2700 !is_large_pte(*sptep)) {
2701 struct kvm_mmu_page *child;
2704 child = page_header(pte & PT64_BASE_ADDR_MASK);
2705 drop_parent_pte(child, sptep);
2706 kvm_flush_remote_tlbs(vcpu->kvm);
2707 } else if (pfn != spte_to_pfn(*sptep)) {
2708 pgprintk("hfn old %llx new %llx\n",
2709 spte_to_pfn(*sptep), pfn);
2710 drop_spte(vcpu->kvm, sptep);
2711 kvm_flush_remote_tlbs(vcpu->kvm);
2716 if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative,
2717 true, host_writable)) {
2719 ret = RET_PF_EMULATE;
2720 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2723 if (unlikely(is_mmio_spte(*sptep)))
2724 ret = RET_PF_EMULATE;
2726 pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2727 trace_kvm_mmu_set_spte(level, gfn, sptep);
2728 if (!was_rmapped && is_large_pte(*sptep))
2729 ++vcpu->kvm->stat.lpages;
2731 if (is_shadow_present_pte(*sptep)) {
2733 rmap_count = rmap_add(vcpu, sptep, gfn);
2734 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
2735 rmap_recycle(vcpu, sptep, gfn);
2742 static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2745 struct kvm_memory_slot *slot;
2747 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
2749 return KVM_PFN_ERR_FAULT;
2751 return gfn_to_pfn_memslot_atomic(slot, gfn);
2754 static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2755 struct kvm_mmu_page *sp,
2756 u64 *start, u64 *end)
2758 struct page *pages[PTE_PREFETCH_NUM];
2759 struct kvm_memory_slot *slot;
2760 unsigned access = sp->role.access;
2764 gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2765 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
2769 ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
2773 for (i = 0; i < ret; i++, gfn++, start++) {
2774 mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn,
2775 page_to_pfn(pages[i]), true, true);
2782 static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
2783 struct kvm_mmu_page *sp, u64 *sptep)
2785 u64 *spte, *start = NULL;
2788 WARN_ON(!sp->role.direct);
2790 i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
2793 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2794 if (is_shadow_present_pte(*spte) || spte == sptep) {
2797 if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2805 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2807 struct kvm_mmu_page *sp;
2810 * Since it's no accessed bit on EPT, it's no way to
2811 * distinguish between actually accessed translations
2812 * and prefetched, so disable pte prefetch if EPT is
2815 if (!shadow_accessed_mask)
2818 sp = page_header(__pa(sptep));
2819 if (sp->role.level > PT_PAGE_TABLE_LEVEL)
2822 __direct_pte_prefetch(vcpu, sp, sptep);
2825 static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it,
2826 gfn_t gfn, kvm_pfn_t *pfnp, int *levelp)
2828 int level = *levelp;
2829 u64 spte = *it.sptep;
2831 if (it.level == level && level > PT_PAGE_TABLE_LEVEL &&
2832 is_nx_huge_page_enabled() &&
2833 is_shadow_present_pte(spte) &&
2834 !is_large_pte(spte)) {
2836 * A small SPTE exists for this pfn, but FNAME(fetch)
2837 * and __direct_map would like to create a large PTE
2838 * instead: just force them to go down another level,
2839 * patching back for them into pfn the next 9 bits of
2842 u64 page_mask = KVM_PAGES_PER_HPAGE(level) - KVM_PAGES_PER_HPAGE(level - 1);
2843 *pfnp |= gfn & page_mask;
2848 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
2849 int map_writable, int level, kvm_pfn_t pfn,
2850 bool prefault, bool lpage_disallowed)
2852 struct kvm_shadow_walk_iterator it;
2853 struct kvm_mmu_page *sp;
2855 gfn_t gfn = gpa >> PAGE_SHIFT;
2856 gfn_t base_gfn = gfn;
2858 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2859 return RET_PF_RETRY;
2861 trace_kvm_mmu_spte_requested(gpa, level, pfn);
2862 for_each_shadow_entry(vcpu, gpa, it) {
2864 * We cannot overwrite existing page tables with an NX
2865 * large page, as the leaf could be executable.
2867 disallowed_hugepage_adjust(it, gfn, &pfn, &level);
2869 base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
2870 if (it.level == level)
2873 drop_large_spte(vcpu, it.sptep);
2874 if (!is_shadow_present_pte(*it.sptep)) {
2875 sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
2876 it.level - 1, true, ACC_ALL);
2878 link_shadow_page(vcpu, it.sptep, sp);
2879 if (lpage_disallowed)
2880 account_huge_nx_page(vcpu->kvm, sp);
2884 ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
2885 write, level, base_gfn, pfn, prefault,
2887 direct_pte_prefetch(vcpu, it.sptep);
2888 ++vcpu->stat.pf_fixed;
2892 static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
2896 info.si_signo = SIGBUS;
2898 info.si_code = BUS_MCEERR_AR;
2899 info.si_addr = (void __user *)address;
2900 info.si_addr_lsb = PAGE_SHIFT;
2902 send_sig_info(SIGBUS, &info, tsk);
2905 static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
2908 * Do not cache the mmio info caused by writing the readonly gfn
2909 * into the spte otherwise read access on readonly gfn also can
2910 * caused mmio page fault and treat it as mmio access.
2912 if (pfn == KVM_PFN_ERR_RO_FAULT)
2913 return RET_PF_EMULATE;
2915 if (pfn == KVM_PFN_ERR_HWPOISON) {
2916 kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
2917 return RET_PF_RETRY;
2923 static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
2924 gfn_t gfn, kvm_pfn_t *pfnp,
2927 kvm_pfn_t pfn = *pfnp;
2928 int level = *levelp;
2931 * Check if it's a transparent hugepage. If this would be an
2932 * hugetlbfs page, level wouldn't be set to
2933 * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
2936 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
2937 !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL &&
2938 PageTransCompoundMap(pfn_to_page(pfn)) &&
2939 !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
2942 * mmu_notifier_retry was successful and we hold the
2943 * mmu_lock here, so the pmd can't become splitting
2944 * from under us, and in turn
2945 * __split_huge_page_refcount() can't run from under
2946 * us and we can safely transfer the refcount from
2947 * PG_tail to PG_head as we switch the pfn to tail to
2950 *levelp = level = PT_DIRECTORY_LEVEL;
2951 mask = KVM_PAGES_PER_HPAGE(level) - 1;
2952 VM_BUG_ON((gfn & mask) != (pfn & mask));
2954 kvm_release_pfn_clean(pfn);
2962 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
2963 kvm_pfn_t pfn, unsigned access, int *ret_val)
2965 /* The pfn is invalid, report the error! */
2966 if (unlikely(is_error_pfn(pfn))) {
2967 *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
2971 if (unlikely(is_noslot_pfn(pfn)))
2972 vcpu_cache_mmio_info(vcpu, gva, gfn, access);
2977 static bool page_fault_can_be_fast(u32 error_code)
2980 * Do not fix the mmio spte with invalid generation number which
2981 * need to be updated by slow page fault path.
2983 if (unlikely(error_code & PFERR_RSVD_MASK))
2987 * #PF can be fast only if the shadow page table is present and it
2988 * is caused by write-protect, that means we just need change the
2989 * W bit of the spte which can be done out of mmu-lock.
2991 if (!(error_code & PFERR_PRESENT_MASK) ||
2992 !(error_code & PFERR_WRITE_MASK))
2999 fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
3000 u64 *sptep, u64 spte)
3004 WARN_ON(!sp->role.direct);
3007 * The gfn of direct spte is stable since it is calculated
3010 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
3013 * Theoretically we could also set dirty bit (and flush TLB) here in
3014 * order to eliminate unnecessary PML logging. See comments in
3015 * set_spte. But fast_page_fault is very unlikely to happen with PML
3016 * enabled, so we do not do this. This might result in the same GPA
3017 * to be logged in PML buffer again when the write really happens, and
3018 * eventually to be called by mark_page_dirty twice. But it's also no
3019 * harm. This also avoids the TLB flush needed after setting dirty bit
3020 * so non-PML cases won't be impacted.
3022 * Compare with set_spte where instead shadow_dirty_mask is set.
3024 if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) == spte)
3025 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3032 * - true: let the vcpu to access on the same address again.
3033 * - false: let the real page fault path to fix it.
3035 static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
3038 struct kvm_shadow_walk_iterator iterator;
3039 struct kvm_mmu_page *sp;
3043 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3046 if (!page_fault_can_be_fast(error_code))
3049 walk_shadow_page_lockless_begin(vcpu);
3050 for_each_shadow_entry_lockless(vcpu, gva, iterator, spte)
3051 if (!is_shadow_present_pte(spte) || iterator.level < level)
3055 * If the mapping has been changed, let the vcpu fault on the
3056 * same address again.
3058 if (!is_shadow_present_pte(spte)) {
3063 sp = page_header(__pa(iterator.sptep));
3064 if (!is_last_spte(spte, sp->role.level))
3068 * Check if it is a spurious fault caused by TLB lazily flushed.
3070 * Need not check the access of upper level table entries since
3071 * they are always ACC_ALL.
3073 if (is_writable_pte(spte)) {
3079 * Currently, to simplify the code, only the spte write-protected
3080 * by dirty-log can be fast fixed.
3082 if (!spte_is_locklessly_modifiable(spte))
3086 * Do not fix write-permission on the large spte since we only dirty
3087 * the first page into the dirty-bitmap in fast_pf_fix_direct_spte()
3088 * that means other pages are missed if its slot is dirty-logged.
3090 * Instead, we let the slow page fault path create a normal spte to
3093 * See the comments in kvm_arch_commit_memory_region().
3095 if (sp->role.level > PT_PAGE_TABLE_LEVEL)
3099 * Currently, fast page fault only works for direct mapping since
3100 * the gfn is not stable for indirect shadow page.
3101 * See Documentation/virtual/kvm/locking.txt to get more detail.
3103 ret = fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte);
3105 trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep,
3107 walk_shadow_page_lockless_end(vcpu);
3112 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
3113 gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable);
3114 static void make_mmu_pages_available(struct kvm_vcpu *vcpu);
3116 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
3117 gfn_t gfn, bool prefault)
3121 bool force_pt_level;
3123 unsigned long mmu_seq;
3124 bool map_writable, write = error_code & PFERR_WRITE_MASK;
3125 bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
3126 is_nx_huge_page_enabled();
3128 force_pt_level = lpage_disallowed;
3129 level = mapping_level(vcpu, gfn, &force_pt_level);
3130 if (likely(!force_pt_level)) {
3132 * This path builds a PAE pagetable - so we can map
3133 * 2mb pages at maximum. Therefore check if the level
3134 * is larger than that.
3136 if (level > PT_DIRECTORY_LEVEL)
3137 level = PT_DIRECTORY_LEVEL;
3139 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
3142 if (fast_page_fault(vcpu, v, level, error_code))
3143 return RET_PF_RETRY;
3145 mmu_seq = vcpu->kvm->mmu_notifier_seq;
3148 if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
3149 return RET_PF_RETRY;
3151 if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
3155 spin_lock(&vcpu->kvm->mmu_lock);
3156 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
3158 make_mmu_pages_available(vcpu);
3159 if (likely(!force_pt_level))
3160 transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
3161 r = __direct_map(vcpu, v, write, map_writable, level, pfn,
3164 spin_unlock(&vcpu->kvm->mmu_lock);
3165 kvm_release_pfn_clean(pfn);
3170 static void mmu_free_roots(struct kvm_vcpu *vcpu)
3173 struct kvm_mmu_page *sp;
3174 LIST_HEAD(invalid_list);
3176 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3179 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL &&
3180 (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL ||
3181 vcpu->arch.mmu.direct_map)) {
3182 hpa_t root = vcpu->arch.mmu.root_hpa;
3184 spin_lock(&vcpu->kvm->mmu_lock);
3185 sp = page_header(root);
3187 if (!sp->root_count && sp->role.invalid) {
3188 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
3189 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
3191 spin_unlock(&vcpu->kvm->mmu_lock);
3192 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
3196 spin_lock(&vcpu->kvm->mmu_lock);
3197 for (i = 0; i < 4; ++i) {
3198 hpa_t root = vcpu->arch.mmu.pae_root[i];
3201 root &= PT64_BASE_ADDR_MASK;
3202 sp = page_header(root);
3204 if (!sp->root_count && sp->role.invalid)
3205 kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
3208 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
3210 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
3211 spin_unlock(&vcpu->kvm->mmu_lock);
3212 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
3215 static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
3219 if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
3220 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3227 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
3229 struct kvm_mmu_page *sp;
3232 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
3233 spin_lock(&vcpu->kvm->mmu_lock);
3234 make_mmu_pages_available(vcpu);
3235 sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL, 1, ACC_ALL);
3237 spin_unlock(&vcpu->kvm->mmu_lock);
3238 vcpu->arch.mmu.root_hpa = __pa(sp->spt);
3239 } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) {
3240 for (i = 0; i < 4; ++i) {
3241 hpa_t root = vcpu->arch.mmu.pae_root[i];
3243 MMU_WARN_ON(VALID_PAGE(root));
3244 spin_lock(&vcpu->kvm->mmu_lock);
3245 make_mmu_pages_available(vcpu);
3246 sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
3247 i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
3248 root = __pa(sp->spt);
3250 spin_unlock(&vcpu->kvm->mmu_lock);
3251 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
3253 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
3260 static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3262 struct kvm_mmu_page *sp;
3267 root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT;
3269 if (mmu_check_root(vcpu, root_gfn))
3273 * Do we shadow a long mode page table? If so we need to
3274 * write-protect the guests page table root.
3276 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
3277 hpa_t root = vcpu->arch.mmu.root_hpa;
3279 MMU_WARN_ON(VALID_PAGE(root));
3281 spin_lock(&vcpu->kvm->mmu_lock);
3282 make_mmu_pages_available(vcpu);
3283 sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL,
3285 root = __pa(sp->spt);
3287 spin_unlock(&vcpu->kvm->mmu_lock);
3288 vcpu->arch.mmu.root_hpa = root;
3293 * We shadow a 32 bit page table. This may be a legacy 2-level
3294 * or a PAE 3-level page table. In either case we need to be aware that
3295 * the shadow page table may be a PAE or a long mode page table.
3297 pm_mask = PT_PRESENT_MASK;
3298 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL)
3299 pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
3301 for (i = 0; i < 4; ++i) {
3302 hpa_t root = vcpu->arch.mmu.pae_root[i];
3304 MMU_WARN_ON(VALID_PAGE(root));
3305 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
3306 pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i);
3307 if (!(pdptr & PT_PRESENT_MASK)) {
3308 vcpu->arch.mmu.pae_root[i] = 0;
3311 root_gfn = pdptr >> PAGE_SHIFT;
3312 if (mmu_check_root(vcpu, root_gfn))
3315 spin_lock(&vcpu->kvm->mmu_lock);
3316 make_mmu_pages_available(vcpu);
3317 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
3319 root = __pa(sp->spt);
3321 spin_unlock(&vcpu->kvm->mmu_lock);
3323 vcpu->arch.mmu.pae_root[i] = root | pm_mask;
3325 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
3328 * If we shadow a 32 bit page table with a long mode page
3329 * table we enter this path.
3331 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
3332 if (vcpu->arch.mmu.lm_root == NULL) {
3334 * The additional page necessary for this is only
3335 * allocated on demand.
3340 lm_root = (void*)get_zeroed_page(GFP_KERNEL);
3341 if (lm_root == NULL)
3344 lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask;
3346 vcpu->arch.mmu.lm_root = lm_root;
3349 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root);
3355 static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
3357 if (vcpu->arch.mmu.direct_map)
3358 return mmu_alloc_direct_roots(vcpu);
3360 return mmu_alloc_shadow_roots(vcpu);
3363 static void mmu_sync_roots(struct kvm_vcpu *vcpu)
3366 struct kvm_mmu_page *sp;
3368 if (vcpu->arch.mmu.direct_map)
3371 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3374 vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3375 kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3376 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
3377 hpa_t root = vcpu->arch.mmu.root_hpa;
3378 sp = page_header(root);
3379 mmu_sync_children(vcpu, sp);
3380 kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3383 for (i = 0; i < 4; ++i) {
3384 hpa_t root = vcpu->arch.mmu.pae_root[i];
3386 if (root && VALID_PAGE(root)) {
3387 root &= PT64_BASE_ADDR_MASK;
3388 sp = page_header(root);
3389 mmu_sync_children(vcpu, sp);
3392 kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3395 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
3397 spin_lock(&vcpu->kvm->mmu_lock);
3398 mmu_sync_roots(vcpu);
3399 spin_unlock(&vcpu->kvm->mmu_lock);
3401 EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots);
3403 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
3404 u32 access, struct x86_exception *exception)
3407 exception->error_code = 0;
3411 static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
3413 struct x86_exception *exception)
3416 exception->error_code = 0;
3417 return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
3421 __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
3423 int bit7 = (pte >> 7) & 1, low6 = pte & 0x3f;
3425 return (pte & rsvd_check->rsvd_bits_mask[bit7][level-1]) |
3426 ((rsvd_check->bad_mt_xwr & (1ull << low6)) != 0);
3429 static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
3431 return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level);
3434 static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level)
3436 return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level);
3439 static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3442 return vcpu_match_mmio_gpa(vcpu, addr);
3444 return vcpu_match_mmio_gva(vcpu, addr);
3447 /* return true if reserved bit is detected on spte. */
3449 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
3451 struct kvm_shadow_walk_iterator iterator;
3452 u64 sptes[PT64_ROOT_LEVEL], spte = 0ull;
3454 bool reserved = false;
3456 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3459 walk_shadow_page_lockless_begin(vcpu);
3461 for (shadow_walk_init(&iterator, vcpu, addr),
3462 leaf = root = iterator.level;
3463 shadow_walk_okay(&iterator);
3464 __shadow_walk_next(&iterator, spte)) {
3465 spte = mmu_spte_get_lockless(iterator.sptep);
3467 sptes[leaf - 1] = spte;
3470 if (!is_shadow_present_pte(spte))
3473 reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte,
3477 walk_shadow_page_lockless_end(vcpu);
3480 pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
3482 while (root > leaf) {
3483 pr_err("------ spte 0x%llx level %d.\n",
3484 sptes[root - 1], root);
3493 static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3498 if (mmio_info_in_cache(vcpu, addr, direct))
3499 return RET_PF_EMULATE;
3501 reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
3502 if (WARN_ON(reserved))
3505 if (is_mmio_spte(spte)) {
3506 gfn_t gfn = get_mmio_spte_gfn(spte);
3507 unsigned access = get_mmio_spte_access(spte);
3509 if (!check_mmio_spte(vcpu, spte))
3510 return RET_PF_INVALID;
3515 trace_handle_mmio_page_fault(addr, gfn, access);
3516 vcpu_cache_mmio_info(vcpu, addr, gfn, access);
3517 return RET_PF_EMULATE;
3521 * If the page table is zapped by other cpus, let CPU fault again on
3524 return RET_PF_RETRY;
3526 EXPORT_SYMBOL_GPL(handle_mmio_page_fault);
3528 static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
3529 u32 error_code, gfn_t gfn)
3531 if (unlikely(error_code & PFERR_RSVD_MASK))
3534 if (!(error_code & PFERR_PRESENT_MASK) ||
3535 !(error_code & PFERR_WRITE_MASK))
3539 * guest is writing the page which is write tracked which can
3540 * not be fixed by page fault handler.
3542 if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
3548 static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
3550 struct kvm_shadow_walk_iterator iterator;
3553 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3556 walk_shadow_page_lockless_begin(vcpu);
3557 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
3558 clear_sp_write_flooding_count(iterator.sptep);
3559 if (!is_shadow_present_pte(spte))
3562 walk_shadow_page_lockless_end(vcpu);
3565 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
3566 u32 error_code, bool prefault)
3568 gfn_t gfn = gva >> PAGE_SHIFT;
3571 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
3573 if (page_fault_handle_page_track(vcpu, error_code, gfn))
3574 return RET_PF_EMULATE;
3576 r = mmu_topup_memory_caches(vcpu);
3580 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3583 return nonpaging_map(vcpu, gva & PAGE_MASK,
3584 error_code, gfn, prefault);
3587 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
3589 struct kvm_arch_async_pf arch;
3591 arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
3593 arch.direct_map = vcpu->arch.mmu.direct_map;
3594 arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
3596 return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3599 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
3601 if (unlikely(!lapic_in_kernel(vcpu) ||
3602 kvm_event_needs_reinjection(vcpu)))
3605 if (is_guest_mode(vcpu))
3608 return kvm_x86_ops->interrupt_allowed(vcpu);
3611 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
3612 gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable)
3614 struct kvm_memory_slot *slot;
3617 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3619 *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable);
3621 return false; /* *pfn has correct page already */
3623 if (!prefault && kvm_can_do_async_pf(vcpu)) {
3624 trace_kvm_try_async_get_page(gva, gfn);
3625 if (kvm_find_async_pf_gfn(vcpu, gfn)) {
3626 trace_kvm_async_pf_doublefault(gva, gfn);
3627 kvm_make_request(KVM_REQ_APF_HALT, vcpu);
3629 } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn))
3633 *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable);
3638 check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level)
3640 int page_num = KVM_PAGES_PER_HPAGE(level);
3642 gfn &= ~(page_num - 1);
3644 return kvm_mtrr_check_gfn_range_consistency(vcpu, gfn, page_num);
3647 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
3653 bool force_pt_level;
3654 gfn_t gfn = gpa >> PAGE_SHIFT;
3655 unsigned long mmu_seq;
3656 int write = error_code & PFERR_WRITE_MASK;
3658 bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
3659 is_nx_huge_page_enabled();
3661 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3663 if (page_fault_handle_page_track(vcpu, error_code, gfn))
3664 return RET_PF_EMULATE;
3666 r = mmu_topup_memory_caches(vcpu);
3672 !check_hugepage_cache_consistency(vcpu, gfn, PT_DIRECTORY_LEVEL);
3673 level = mapping_level(vcpu, gfn, &force_pt_level);
3674 if (likely(!force_pt_level)) {
3675 if (level > PT_DIRECTORY_LEVEL &&
3676 !check_hugepage_cache_consistency(vcpu, gfn, level))
3677 level = PT_DIRECTORY_LEVEL;
3678 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
3681 if (fast_page_fault(vcpu, gpa, level, error_code))
3682 return RET_PF_RETRY;
3684 mmu_seq = vcpu->kvm->mmu_notifier_seq;
3687 if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
3688 return RET_PF_RETRY;
3690 if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
3694 spin_lock(&vcpu->kvm->mmu_lock);
3695 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
3697 make_mmu_pages_available(vcpu);
3698 if (likely(!force_pt_level))
3699 transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
3700 r = __direct_map(vcpu, gpa, write, map_writable, level, pfn,
3701 prefault, lpage_disallowed);
3703 spin_unlock(&vcpu->kvm->mmu_lock);
3704 kvm_release_pfn_clean(pfn);
3708 static void nonpaging_init_context(struct kvm_vcpu *vcpu,
3709 struct kvm_mmu *context)
3711 context->page_fault = nonpaging_page_fault;
3712 context->gva_to_gpa = nonpaging_gva_to_gpa;
3713 context->sync_page = nonpaging_sync_page;
3714 context->invlpg = nonpaging_invlpg;
3715 context->update_pte = nonpaging_update_pte;
3716 context->root_level = 0;
3717 context->shadow_root_level = PT32E_ROOT_LEVEL;
3718 context->root_hpa = INVALID_PAGE;
3719 context->direct_map = true;
3720 context->nx = false;
3723 void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu)
3725 mmu_free_roots(vcpu);
3728 static unsigned long get_cr3(struct kvm_vcpu *vcpu)
3730 return kvm_read_cr3(vcpu);
3733 static void inject_page_fault(struct kvm_vcpu *vcpu,
3734 struct x86_exception *fault)
3736 vcpu->arch.mmu.inject_page_fault(vcpu, fault);
3739 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
3740 unsigned access, int *nr_present)
3742 if (unlikely(is_mmio_spte(*sptep))) {
3743 if (gfn != get_mmio_spte_gfn(*sptep)) {
3744 mmu_spte_clear_no_track(sptep);
3749 mark_mmio_spte(vcpu, sptep, gfn, access);
3756 static inline bool is_last_gpte(struct kvm_mmu *mmu,
3757 unsigned level, unsigned gpte)
3760 * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
3761 * If it is clear, there are no large pages at this level, so clear
3762 * PT_PAGE_SIZE_MASK in gpte if that is the case.
3764 gpte &= level - mmu->last_nonleaf_level;
3767 * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
3768 * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
3769 * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
3771 gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
3773 return gpte & PT_PAGE_SIZE_MASK;
3776 #define PTTYPE_EPT 18 /* arbitrary */
3777 #define PTTYPE PTTYPE_EPT
3778 #include "paging_tmpl.h"
3782 #include "paging_tmpl.h"
3786 #include "paging_tmpl.h"
3790 __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
3791 struct rsvd_bits_validate *rsvd_check,
3792 int maxphyaddr, int level, bool nx, bool gbpages,
3795 u64 exb_bit_rsvd = 0;
3796 u64 gbpages_bit_rsvd = 0;
3797 u64 nonleaf_bit8_rsvd = 0;
3799 rsvd_check->bad_mt_xwr = 0;
3802 exb_bit_rsvd = rsvd_bits(63, 63);
3804 gbpages_bit_rsvd = rsvd_bits(7, 7);
3807 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
3808 * leaf entries) on AMD CPUs only.
3811 nonleaf_bit8_rsvd = rsvd_bits(8, 8);
3814 case PT32_ROOT_LEVEL:
3815 /* no rsvd bits for 2 level 4K page table entries */
3816 rsvd_check->rsvd_bits_mask[0][1] = 0;
3817 rsvd_check->rsvd_bits_mask[0][0] = 0;
3818 rsvd_check->rsvd_bits_mask[1][0] =
3819 rsvd_check->rsvd_bits_mask[0][0];
3822 rsvd_check->rsvd_bits_mask[1][1] = 0;
3826 if (is_cpuid_PSE36())
3827 /* 36bits PSE 4MB page */
3828 rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
3830 /* 32 bits PSE 4MB page */
3831 rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
3833 case PT32E_ROOT_LEVEL:
3834 rsvd_check->rsvd_bits_mask[0][2] =
3835 rsvd_bits(maxphyaddr, 63) |
3836 rsvd_bits(5, 8) | rsvd_bits(1, 2); /* PDPTE */
3837 rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
3838 rsvd_bits(maxphyaddr, 62); /* PDE */
3839 rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
3840 rsvd_bits(maxphyaddr, 62); /* PTE */
3841 rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
3842 rsvd_bits(maxphyaddr, 62) |
3843 rsvd_bits(13, 20); /* large page */
3844 rsvd_check->rsvd_bits_mask[1][0] =
3845 rsvd_check->rsvd_bits_mask[0][0];
3847 case PT64_ROOT_LEVEL:
3848 rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
3849 nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
3850 rsvd_bits(maxphyaddr, 51);
3851 rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd |
3853 rsvd_bits(maxphyaddr, 51);
3854 rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
3855 rsvd_bits(maxphyaddr, 51);
3856 rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
3857 rsvd_bits(maxphyaddr, 51);
3858 rsvd_check->rsvd_bits_mask[1][3] =
3859 rsvd_check->rsvd_bits_mask[0][3];
3860 rsvd_check->rsvd_bits_mask[1][2] = exb_bit_rsvd |
3861 gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) |
3863 rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
3864 rsvd_bits(maxphyaddr, 51) |
3865 rsvd_bits(13, 20); /* large page */
3866 rsvd_check->rsvd_bits_mask[1][0] =
3867 rsvd_check->rsvd_bits_mask[0][0];
3872 static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
3873 struct kvm_mmu *context)
3875 __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
3876 cpuid_maxphyaddr(vcpu), context->root_level,
3877 context->nx, guest_cpuid_has_gbpages(vcpu),
3878 is_pse(vcpu), guest_cpuid_is_amd(vcpu));
3882 __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
3883 int maxphyaddr, bool execonly)
3887 rsvd_check->rsvd_bits_mask[0][3] =
3888 rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
3889 rsvd_check->rsvd_bits_mask[0][2] =
3890 rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
3891 rsvd_check->rsvd_bits_mask[0][1] =
3892 rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
3893 rsvd_check->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51);
3896 rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
3897 rsvd_check->rsvd_bits_mask[1][2] =
3898 rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29);
3899 rsvd_check->rsvd_bits_mask[1][1] =
3900 rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20);
3901 rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
3903 bad_mt_xwr = 0xFFull << (2 * 8); /* bits 3..5 must not be 2 */
3904 bad_mt_xwr |= 0xFFull << (3 * 8); /* bits 3..5 must not be 3 */
3905 bad_mt_xwr |= 0xFFull << (7 * 8); /* bits 3..5 must not be 7 */
3906 bad_mt_xwr |= REPEAT_BYTE(1ull << 2); /* bits 0..2 must not be 010 */
3907 bad_mt_xwr |= REPEAT_BYTE(1ull << 6); /* bits 0..2 must not be 110 */
3909 /* bits 0..2 must not be 100 unless VMX capabilities allow it */
3910 bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
3912 rsvd_check->bad_mt_xwr = bad_mt_xwr;
3915 static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
3916 struct kvm_mmu *context, bool execonly)
3918 __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
3919 cpuid_maxphyaddr(vcpu), execonly);
3923 * the page table on host is the shadow page table for the page
3924 * table in guest or amd nested guest, its mmu features completely
3925 * follow the features in guest.
3928 reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
3931 * KVM uses NX when TDP is disabled to handle a variety of scenarios,
3932 * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
3933 * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
3934 * The iTLB multi-hit workaround can be toggled at any time, so assume
3935 * NX can be used by any non-nested shadow MMU to avoid having to reset
3936 * MMU contexts. Note, KVM forces EFER.NX=1 when TDP is disabled.
3938 bool uses_nx = context->nx || !tdp_enabled ||
3939 context->base_role.smep_andnot_wp;
3942 * Passing "true" to the last argument is okay; it adds a check
3943 * on bit 8 of the SPTEs which KVM doesn't use anyway.
3945 __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
3946 boot_cpu_data.x86_phys_bits,
3947 context->shadow_root_level, uses_nx,
3948 guest_cpuid_has_gbpages(vcpu), is_pse(vcpu),
3951 EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
3953 static inline bool boot_cpu_is_amd(void)
3955 WARN_ON_ONCE(!tdp_enabled);
3956 return shadow_x_mask == 0;
3960 * the direct page table on host, use as much mmu features as
3961 * possible, however, kvm currently does not do execution-protection.
3964 reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
3965 struct kvm_mmu *context)
3967 if (boot_cpu_is_amd())
3968 __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
3969 boot_cpu_data.x86_phys_bits,
3970 context->shadow_root_level, false,
3971 boot_cpu_has(X86_FEATURE_GBPAGES),
3974 __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
3975 boot_cpu_data.x86_phys_bits,
3981 * as the comments in reset_shadow_zero_bits_mask() except it
3982 * is the shadow page table for intel nested guest.
3985 reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
3986 struct kvm_mmu *context, bool execonly)
3988 __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
3989 boot_cpu_data.x86_phys_bits, execonly);
3992 static void update_permission_bitmask(struct kvm_vcpu *vcpu,
3993 struct kvm_mmu *mmu, bool ept)
3995 unsigned bit, byte, pfec;
3997 bool fault, x, w, u, wf, uf, ff, smapf, cr4_smap, cr4_smep, smap = 0;
3999 cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
4000 cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
4001 for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
4004 wf = pfec & PFERR_WRITE_MASK;
4005 uf = pfec & PFERR_USER_MASK;
4006 ff = pfec & PFERR_FETCH_MASK;
4008 * PFERR_RSVD_MASK bit is set in PFEC if the access is not
4009 * subject to SMAP restrictions, and cleared otherwise. The
4010 * bit is only meaningful if the SMAP bit is set in CR4.
4012 smapf = !(pfec & PFERR_RSVD_MASK);
4013 for (bit = 0; bit < 8; ++bit) {
4014 x = bit & ACC_EXEC_MASK;
4015 w = bit & ACC_WRITE_MASK;
4016 u = bit & ACC_USER_MASK;
4019 /* Not really needed: !nx will cause pte.nx to fault */
4021 /* Allow supervisor writes if !cr0.wp */
4022 w |= !is_write_protection(vcpu) && !uf;
4023 /* Disallow supervisor fetches of user code if cr4.smep */
4024 x &= !(cr4_smep && u && !uf);
4027 * SMAP:kernel-mode data accesses from user-mode
4028 * mappings should fault. A fault is considered
4029 * as a SMAP violation if all of the following
4030 * conditions are ture:
4031 * - X86_CR4_SMAP is set in CR4
4032 * - An user page is accessed
4033 * - Page fault in kernel mode
4034 * - if CPL = 3 or X86_EFLAGS_AC is clear
4036 * Here, we cover the first three conditions.
4037 * The fourth is computed dynamically in
4038 * permission_fault() and is in smapf.
4040 * Also, SMAP does not affect instruction
4041 * fetches, add the !ff check here to make it
4044 smap = cr4_smap && u && !uf && !ff;
4047 fault = (ff && !x) || (uf && !u) || (wf && !w) ||
4049 map |= fault << bit;
4051 mmu->permissions[byte] = map;
4056 * PKU is an additional mechanism by which the paging controls access to
4057 * user-mode addresses based on the value in the PKRU register. Protection
4058 * key violations are reported through a bit in the page fault error code.
4059 * Unlike other bits of the error code, the PK bit is not known at the
4060 * call site of e.g. gva_to_gpa; it must be computed directly in
4061 * permission_fault based on two bits of PKRU, on some machine state (CR4,
4062 * CR0, EFER, CPL), and on other bits of the error code and the page tables.
4064 * In particular the following conditions come from the error code, the
4065 * page tables and the machine state:
4066 * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
4067 * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
4068 * - PK is always zero if U=0 in the page tables
4069 * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
4071 * The PKRU bitmask caches the result of these four conditions. The error
4072 * code (minus the P bit) and the page table's U bit form an index into the
4073 * PKRU bitmask. Two bits of the PKRU bitmask are then extracted and ANDed
4074 * with the two bits of the PKRU register corresponding to the protection key.
4075 * For the first three conditions above the bits will be 00, thus masking
4076 * away both AD and WD. For all reads or if the last condition holds, WD
4077 * only will be masked away.
4079 static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
4090 /* PKEY is enabled only if CR4.PKE and EFER.LMA are both set. */
4091 if (!kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || !is_long_mode(vcpu)) {
4096 wp = is_write_protection(vcpu);
4098 for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
4099 unsigned pfec, pkey_bits;
4100 bool check_pkey, check_write, ff, uf, wf, pte_user;
4103 ff = pfec & PFERR_FETCH_MASK;
4104 uf = pfec & PFERR_USER_MASK;
4105 wf = pfec & PFERR_WRITE_MASK;
4107 /* PFEC.RSVD is replaced by ACC_USER_MASK. */
4108 pte_user = pfec & PFERR_RSVD_MASK;
4111 * Only need to check the access which is not an
4112 * instruction fetch and is to a user page.
4114 check_pkey = (!ff && pte_user);
4116 * write access is controlled by PKRU if it is a
4117 * user access or CR0.WP = 1.
4119 check_write = check_pkey && wf && (uf || wp);
4121 /* PKRU.AD stops both read and write access. */
4122 pkey_bits = !!check_pkey;
4123 /* PKRU.WD stops write access. */
4124 pkey_bits |= (!!check_write) << 1;
4126 mmu->pkru_mask |= (pkey_bits & 3) << pfec;
4130 static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
4132 unsigned root_level = mmu->root_level;
4134 mmu->last_nonleaf_level = root_level;
4135 if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu))
4136 mmu->last_nonleaf_level++;
4139 static void paging64_init_context_common(struct kvm_vcpu *vcpu,
4140 struct kvm_mmu *context,
4143 context->nx = is_nx(vcpu);
4144 context->root_level = level;
4146 reset_rsvds_bits_mask(vcpu, context);
4147 update_permission_bitmask(vcpu, context, false);
4148 update_pkru_bitmask(vcpu, context, false);
4149 update_last_nonleaf_level(vcpu, context);
4151 MMU_WARN_ON(!is_pae(vcpu));
4152 context->page_fault = paging64_page_fault;
4153 context->gva_to_gpa = paging64_gva_to_gpa;
4154 context->sync_page = paging64_sync_page;
4155 context->invlpg = paging64_invlpg;
4156 context->update_pte = paging64_update_pte;
4157 context->shadow_root_level = level;
4158 context->root_hpa = INVALID_PAGE;
4159 context->direct_map = false;
4162 static void paging64_init_context(struct kvm_vcpu *vcpu,
4163 struct kvm_mmu *context)
4165 paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL);
4168 static void paging32_init_context(struct kvm_vcpu *vcpu,
4169 struct kvm_mmu *context)
4171 context->nx = false;
4172 context->root_level = PT32_ROOT_LEVEL;
4174 reset_rsvds_bits_mask(vcpu, context);
4175 update_permission_bitmask(vcpu, context, false);
4176 update_pkru_bitmask(vcpu, context, false);
4177 update_last_nonleaf_level(vcpu, context);
4179 context->page_fault = paging32_page_fault;
4180 context->gva_to_gpa = paging32_gva_to_gpa;
4181 context->sync_page = paging32_sync_page;
4182 context->invlpg = paging32_invlpg;
4183 context->update_pte = paging32_update_pte;
4184 context->shadow_root_level = PT32E_ROOT_LEVEL;
4185 context->root_hpa = INVALID_PAGE;
4186 context->direct_map = false;
4189 static void paging32E_init_context(struct kvm_vcpu *vcpu,
4190 struct kvm_mmu *context)
4192 paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
4195 static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
4197 struct kvm_mmu *context = &vcpu->arch.mmu;
4199 context->base_role.word = 0;
4200 context->base_role.smm = is_smm(vcpu);
4201 context->page_fault = tdp_page_fault;
4202 context->sync_page = nonpaging_sync_page;
4203 context->invlpg = nonpaging_invlpg;
4204 context->update_pte = nonpaging_update_pte;
4205 context->shadow_root_level = kvm_x86_ops->get_tdp_level();
4206 context->root_hpa = INVALID_PAGE;
4207 context->direct_map = true;
4208 context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
4209 context->get_cr3 = get_cr3;
4210 context->get_pdptr = kvm_pdptr_read;
4211 context->inject_page_fault = kvm_inject_page_fault;
4213 if (!is_paging(vcpu)) {
4214 context->nx = false;
4215 context->gva_to_gpa = nonpaging_gva_to_gpa;
4216 context->root_level = 0;
4217 } else if (is_long_mode(vcpu)) {
4218 context->nx = is_nx(vcpu);
4219 context->root_level = PT64_ROOT_LEVEL;
4220 reset_rsvds_bits_mask(vcpu, context);
4221 context->gva_to_gpa = paging64_gva_to_gpa;
4222 } else if (is_pae(vcpu)) {
4223 context->nx = is_nx(vcpu);
4224 context->root_level = PT32E_ROOT_LEVEL;
4225 reset_rsvds_bits_mask(vcpu, context);
4226 context->gva_to_gpa = paging64_gva_to_gpa;
4228 context->nx = false;
4229 context->root_level = PT32_ROOT_LEVEL;
4230 reset_rsvds_bits_mask(vcpu, context);
4231 context->gva_to_gpa = paging32_gva_to_gpa;
4234 update_permission_bitmask(vcpu, context, false);
4235 update_pkru_bitmask(vcpu, context, false);
4236 update_last_nonleaf_level(vcpu, context);
4237 reset_tdp_shadow_zero_bits_mask(vcpu, context);
4240 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
4242 bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
4243 bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
4244 struct kvm_mmu *context = &vcpu->arch.mmu;
4246 MMU_WARN_ON(VALID_PAGE(context->root_hpa));
4248 if (!is_paging(vcpu))
4249 nonpaging_init_context(vcpu, context);
4250 else if (is_long_mode(vcpu))
4251 paging64_init_context(vcpu, context);
4252 else if (is_pae(vcpu))
4253 paging32E_init_context(vcpu, context);
4255 paging32_init_context(vcpu, context);
4257 context->base_role.nxe = is_nx(vcpu);
4258 context->base_role.cr4_pae = !!is_pae(vcpu);
4259 context->base_role.cr0_wp = is_write_protection(vcpu);
4260 context->base_role.smep_andnot_wp
4261 = smep && !is_write_protection(vcpu);
4262 context->base_role.smap_andnot_wp
4263 = smap && !is_write_protection(vcpu);
4264 context->base_role.smm = is_smm(vcpu);
4265 reset_shadow_zero_bits_mask(vcpu, context);
4267 EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
4269 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly)
4271 struct kvm_mmu *context = &vcpu->arch.mmu;
4273 MMU_WARN_ON(VALID_PAGE(context->root_hpa));
4275 context->shadow_root_level = kvm_x86_ops->get_tdp_level();
4278 context->page_fault = ept_page_fault;
4279 context->gva_to_gpa = ept_gva_to_gpa;
4280 context->sync_page = ept_sync_page;
4281 context->invlpg = ept_invlpg;
4282 context->update_pte = ept_update_pte;
4283 context->root_level = context->shadow_root_level;
4284 context->root_hpa = INVALID_PAGE;
4285 context->direct_map = false;
4287 update_permission_bitmask(vcpu, context, true);
4288 update_pkru_bitmask(vcpu, context, true);
4289 update_last_nonleaf_level(vcpu, context);
4290 reset_rsvds_bits_mask_ept(vcpu, context, execonly);
4291 reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
4293 EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
4295 static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
4297 struct kvm_mmu *context = &vcpu->arch.mmu;
4299 kvm_init_shadow_mmu(vcpu);
4300 context->set_cr3 = kvm_x86_ops->set_cr3;
4301 context->get_cr3 = get_cr3;
4302 context->get_pdptr = kvm_pdptr_read;
4303 context->inject_page_fault = kvm_inject_page_fault;
4306 static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
4308 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
4310 g_context->get_cr3 = get_cr3;
4311 g_context->get_pdptr = kvm_pdptr_read;
4312 g_context->inject_page_fault = kvm_inject_page_fault;
4315 * Note that arch.mmu.gva_to_gpa translates l2_gpa to l1_gpa using
4316 * L1's nested page tables (e.g. EPT12). The nested translation
4317 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
4318 * L2's page tables as the first level of translation and L1's
4319 * nested page tables as the second level of translation. Basically
4320 * the gva_to_gpa functions between mmu and nested_mmu are swapped.
4322 if (!is_paging(vcpu)) {
4323 g_context->nx = false;
4324 g_context->root_level = 0;
4325 g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
4326 } else if (is_long_mode(vcpu)) {
4327 g_context->nx = is_nx(vcpu);
4328 g_context->root_level = PT64_ROOT_LEVEL;
4329 reset_rsvds_bits_mask(vcpu, g_context);
4330 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4331 } else if (is_pae(vcpu)) {
4332 g_context->nx = is_nx(vcpu);
4333 g_context->root_level = PT32E_ROOT_LEVEL;
4334 reset_rsvds_bits_mask(vcpu, g_context);
4335 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4337 g_context->nx = false;
4338 g_context->root_level = PT32_ROOT_LEVEL;
4339 reset_rsvds_bits_mask(vcpu, g_context);
4340 g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
4343 update_permission_bitmask(vcpu, g_context, false);
4344 update_pkru_bitmask(vcpu, g_context, false);
4345 update_last_nonleaf_level(vcpu, g_context);
4348 static void init_kvm_mmu(struct kvm_vcpu *vcpu)
4350 if (mmu_is_nested(vcpu))
4351 init_kvm_nested_mmu(vcpu);
4352 else if (tdp_enabled)
4353 init_kvm_tdp_mmu(vcpu);
4355 init_kvm_softmmu(vcpu);
4358 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
4360 kvm_mmu_unload(vcpu);
4363 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
4365 int kvm_mmu_load(struct kvm_vcpu *vcpu)
4369 r = mmu_topup_memory_caches(vcpu);
4372 r = mmu_alloc_roots(vcpu);
4373 kvm_mmu_sync_roots(vcpu);
4376 /* set_cr3() should ensure TLB has been flushed */
4377 vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
4381 EXPORT_SYMBOL_GPL(kvm_mmu_load);
4383 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
4385 mmu_free_roots(vcpu);
4386 WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
4388 EXPORT_SYMBOL_GPL(kvm_mmu_unload);
4390 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
4391 struct kvm_mmu_page *sp, u64 *spte,
4394 if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
4395 ++vcpu->kvm->stat.mmu_pde_zapped;
4399 ++vcpu->kvm->stat.mmu_pte_updated;
4400 vcpu->arch.mmu.update_pte(vcpu, sp, spte, new);
4403 static bool need_remote_flush(u64 old, u64 new)
4405 if (!is_shadow_present_pte(old))
4407 if (!is_shadow_present_pte(new))
4409 if ((old ^ new) & PT64_BASE_ADDR_MASK)
4411 old ^= shadow_nx_mask;
4412 new ^= shadow_nx_mask;
4413 return (old & ~new & PT64_PERM_MASK) != 0;
4416 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
4423 * Assume that the pte write on a page table of the same type
4424 * as the current vcpu paging mode since we update the sptes only
4425 * when they have the same mode.
4427 if (is_pae(vcpu) && *bytes == 4) {
4428 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
4433 if (*bytes == 4 || *bytes == 8) {
4434 r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
4443 * If we're seeing too many writes to a page, it may no longer be a page table,
4444 * or we may be forking, in which case it is better to unmap the page.
4446 static bool detect_write_flooding(struct kvm_mmu_page *sp)
4449 * Skip write-flooding detected for the sp whose level is 1, because
4450 * it can become unsync, then the guest page is not write-protected.
4452 if (sp->role.level == PT_PAGE_TABLE_LEVEL)
4455 atomic_inc(&sp->write_flooding_count);
4456 return atomic_read(&sp->write_flooding_count) >= 3;
4460 * Misaligned accesses are too much trouble to fix up; also, they usually
4461 * indicate a page is not used as a page table.
4463 static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
4466 unsigned offset, pte_size, misaligned;
4468 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
4469 gpa, bytes, sp->role.word);
4471 offset = offset_in_page(gpa);
4472 pte_size = sp->role.cr4_pae ? 8 : 4;
4475 * Sometimes, the OS only writes the last one bytes to update status
4476 * bits, for example, in linux, andb instruction is used in clear_bit().
4478 if (!(offset & (pte_size - 1)) && bytes == 1)
4481 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
4482 misaligned |= bytes < 4;
4487 static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
4489 unsigned page_offset, quadrant;
4493 page_offset = offset_in_page(gpa);
4494 level = sp->role.level;
4496 if (!sp->role.cr4_pae) {
4497 page_offset <<= 1; /* 32->64 */
4499 * A 32-bit pde maps 4MB while the shadow pdes map
4500 * only 2MB. So we need to double the offset again
4501 * and zap two pdes instead of one.
4503 if (level == PT32_ROOT_LEVEL) {
4504 page_offset &= ~7; /* kill rounding error */
4508 quadrant = page_offset >> PAGE_SHIFT;
4509 page_offset &= ~PAGE_MASK;
4510 if (quadrant != sp->role.quadrant)
4514 spte = &sp->spt[page_offset / sizeof(*spte)];
4518 static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
4519 const u8 *new, int bytes)
4521 gfn_t gfn = gpa >> PAGE_SHIFT;
4522 struct kvm_mmu_page *sp;
4523 LIST_HEAD(invalid_list);
4524 u64 entry, gentry, *spte;
4526 bool remote_flush, local_flush;
4527 union kvm_mmu_page_role mask = { };
4532 mask.smep_andnot_wp = 1;
4533 mask.smap_andnot_wp = 1;
4537 * If we don't have indirect shadow pages, it means no page is
4538 * write-protected, so we can exit simply.
4540 if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
4543 remote_flush = local_flush = false;
4545 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
4548 * No need to care whether allocation memory is successful
4549 * or not since pte prefetch is skiped if it does not have
4550 * enough objects in the cache.
4552 mmu_topup_memory_caches(vcpu);
4554 spin_lock(&vcpu->kvm->mmu_lock);
4556 gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
4558 ++vcpu->kvm->stat.mmu_pte_write;
4559 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
4561 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
4562 if (detect_write_misaligned(sp, gpa, bytes) ||
4563 detect_write_flooding(sp)) {
4564 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
4565 ++vcpu->kvm->stat.mmu_flooded;
4569 spte = get_written_sptes(sp, gpa, &npte);
4576 mmu_page_zap_pte(vcpu->kvm, sp, spte);
4578 !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
4579 & mask.word) && rmap_can_add(vcpu))
4580 mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
4581 if (need_remote_flush(entry, *spte))
4582 remote_flush = true;
4586 kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush);
4587 kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
4588 spin_unlock(&vcpu->kvm->mmu_lock);
4591 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
4596 if (vcpu->arch.mmu.direct_map)
4599 gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
4601 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
4605 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
4607 static void make_mmu_pages_available(struct kvm_vcpu *vcpu)
4609 LIST_HEAD(invalid_list);
4611 if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES))
4614 while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
4615 if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
4618 ++vcpu->kvm->stat.mmu_recycled;
4620 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
4623 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
4624 void *insn, int insn_len)
4626 int r, emulation_type = EMULTYPE_RETRY;
4627 enum emulation_result er;
4628 bool direct = vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu);
4631 if (unlikely(error_code & PFERR_RSVD_MASK)) {
4632 r = handle_mmio_page_fault(vcpu, cr2, direct);
4633 if (r == RET_PF_EMULATE) {
4639 if (r == RET_PF_INVALID) {
4640 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
4641 WARN_ON(r == RET_PF_INVALID);
4644 if (r == RET_PF_RETRY)
4649 if (mmio_info_in_cache(vcpu, cr2, direct))
4652 er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
4657 case EMULATE_USER_EXIT:
4658 ++vcpu->stat.mmio_exits;
4666 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
4668 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
4670 vcpu->arch.mmu.invlpg(vcpu, gva);
4671 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4672 ++vcpu->stat.invlpg;
4674 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
4676 void kvm_enable_tdp(void)
4680 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
4682 void kvm_disable_tdp(void)
4684 tdp_enabled = false;
4686 EXPORT_SYMBOL_GPL(kvm_disable_tdp);
4688 static void free_mmu_pages(struct kvm_vcpu *vcpu)
4690 free_page((unsigned long)vcpu->arch.mmu.pae_root);
4691 if (vcpu->arch.mmu.lm_root != NULL)
4692 free_page((unsigned long)vcpu->arch.mmu.lm_root);
4695 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
4701 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
4702 * Therefore we need to allocate shadow page tables in the first
4703 * 4GB of memory, which happens to fit the DMA32 zone.
4705 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
4709 vcpu->arch.mmu.pae_root = page_address(page);
4710 for (i = 0; i < 4; ++i)
4711 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
4716 int kvm_mmu_create(struct kvm_vcpu *vcpu)
4718 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
4719 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
4720 vcpu->arch.mmu.translate_gpa = translate_gpa;
4721 vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
4723 return alloc_mmu_pages(vcpu);
4726 void kvm_mmu_setup(struct kvm_vcpu *vcpu)
4728 MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
4733 void kvm_mmu_init_vm(struct kvm *kvm)
4735 struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
4737 node->track_write = kvm_mmu_pte_write;
4738 kvm_page_track_register_notifier(kvm, node);
4741 void kvm_mmu_uninit_vm(struct kvm *kvm)
4743 struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
4745 kvm_page_track_unregister_notifier(kvm, node);
4748 /* The return value indicates if tlb flush on all vcpus is needed. */
4749 typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
4751 /* The caller should hold mmu-lock before calling this function. */
4752 static __always_inline bool
4753 slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
4754 slot_level_handler fn, int start_level, int end_level,
4755 gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
4757 struct slot_rmap_walk_iterator iterator;
4760 for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
4761 end_gfn, &iterator) {
4763 flush |= fn(kvm, iterator.rmap);
4765 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
4766 if (flush && lock_flush_tlb) {
4767 kvm_flush_remote_tlbs(kvm);
4770 cond_resched_lock(&kvm->mmu_lock);
4774 if (flush && lock_flush_tlb) {
4775 kvm_flush_remote_tlbs(kvm);
4782 static __always_inline bool
4783 slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
4784 slot_level_handler fn, int start_level, int end_level,
4785 bool lock_flush_tlb)
4787 return slot_handle_level_range(kvm, memslot, fn, start_level,
4788 end_level, memslot->base_gfn,
4789 memslot->base_gfn + memslot->npages - 1,
4793 static __always_inline bool
4794 slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
4795 slot_level_handler fn, bool lock_flush_tlb)
4797 return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
4798 PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
4801 static __always_inline bool
4802 slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
4803 slot_level_handler fn, bool lock_flush_tlb)
4805 return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1,
4806 PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
4809 static __always_inline bool
4810 slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
4811 slot_level_handler fn, bool lock_flush_tlb)
4813 return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
4814 PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
4817 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
4819 struct kvm_memslots *slots;
4820 struct kvm_memory_slot *memslot;
4823 spin_lock(&kvm->mmu_lock);
4824 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
4825 slots = __kvm_memslots(kvm, i);
4826 kvm_for_each_memslot(memslot, slots) {
4829 start = max(gfn_start, memslot->base_gfn);
4830 end = min(gfn_end, memslot->base_gfn + memslot->npages);
4834 slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
4835 PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
4836 start, end - 1, true);
4840 spin_unlock(&kvm->mmu_lock);
4843 static bool slot_rmap_write_protect(struct kvm *kvm,
4844 struct kvm_rmap_head *rmap_head)
4846 return __rmap_write_protect(kvm, rmap_head, false);
4849 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
4850 struct kvm_memory_slot *memslot)
4854 spin_lock(&kvm->mmu_lock);
4855 flush = slot_handle_all_level(kvm, memslot, slot_rmap_write_protect,
4857 spin_unlock(&kvm->mmu_lock);
4860 * kvm_mmu_slot_remove_write_access() and kvm_vm_ioctl_get_dirty_log()
4861 * which do tlb flush out of mmu-lock should be serialized by
4862 * kvm->slots_lock otherwise tlb flush would be missed.
4864 lockdep_assert_held(&kvm->slots_lock);
4867 * We can flush all the TLBs out of the mmu lock without TLB
4868 * corruption since we just change the spte from writable to
4869 * readonly so that we only need to care the case of changing
4870 * spte from present to present (changing the spte from present
4871 * to nonpresent will flush all the TLBs immediately), in other
4872 * words, the only case we care is mmu_spte_update() where we
4873 * haved checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE
4874 * instead of PT_WRITABLE_MASK, that means it does not depend
4875 * on PT_WRITABLE_MASK anymore.
4878 kvm_flush_remote_tlbs(kvm);
4881 static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
4882 struct kvm_rmap_head *rmap_head)
4885 struct rmap_iterator iter;
4886 int need_tlb_flush = 0;
4888 struct kvm_mmu_page *sp;
4891 for_each_rmap_spte(rmap_head, &iter, sptep) {
4892 sp = page_header(__pa(sptep));
4893 pfn = spte_to_pfn(*sptep);
4896 * We cannot do huge page mapping for indirect shadow pages,
4897 * which are found on the last rmap (level = 1) when not using
4898 * tdp; such shadow pages are synced with the page table in
4899 * the guest, and the guest page table is using 4K page size
4900 * mapping if the indirect sp has level = 1.
4902 if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
4903 !kvm_is_zone_device_pfn(pfn) &&
4904 PageTransCompoundMap(pfn_to_page(pfn))) {
4905 drop_spte(kvm, sptep);
4911 return need_tlb_flush;
4914 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
4915 const struct kvm_memory_slot *memslot)
4917 /* FIXME: const-ify all uses of struct kvm_memory_slot. */
4918 spin_lock(&kvm->mmu_lock);
4919 slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
4920 kvm_mmu_zap_collapsible_spte, true);
4921 spin_unlock(&kvm->mmu_lock);
4924 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
4925 struct kvm_memory_slot *memslot)
4929 spin_lock(&kvm->mmu_lock);
4930 flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
4931 spin_unlock(&kvm->mmu_lock);
4933 lockdep_assert_held(&kvm->slots_lock);
4936 * It's also safe to flush TLBs out of mmu lock here as currently this
4937 * function is only used for dirty logging, in which case flushing TLB
4938 * out of mmu lock also guarantees no dirty pages will be lost in
4942 kvm_flush_remote_tlbs(kvm);
4944 EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
4946 void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
4947 struct kvm_memory_slot *memslot)
4951 spin_lock(&kvm->mmu_lock);
4952 flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
4954 spin_unlock(&kvm->mmu_lock);
4956 /* see kvm_mmu_slot_remove_write_access */
4957 lockdep_assert_held(&kvm->slots_lock);
4960 kvm_flush_remote_tlbs(kvm);
4962 EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
4964 void kvm_mmu_slot_set_dirty(struct kvm *kvm,
4965 struct kvm_memory_slot *memslot)
4969 spin_lock(&kvm->mmu_lock);
4970 flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
4971 spin_unlock(&kvm->mmu_lock);
4973 lockdep_assert_held(&kvm->slots_lock);
4975 /* see kvm_mmu_slot_leaf_clear_dirty */
4977 kvm_flush_remote_tlbs(kvm);
4979 EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
4981 #define BATCH_ZAP_PAGES 10
4982 static void kvm_zap_obsolete_pages(struct kvm *kvm)
4984 struct kvm_mmu_page *sp, *node;
4988 list_for_each_entry_safe_reverse(sp, node,
4989 &kvm->arch.active_mmu_pages, link) {
4993 * No obsolete page exists before new created page since
4994 * active_mmu_pages is the FIFO list.
4996 if (!is_obsolete_sp(kvm, sp))
5000 * Since we are reversely walking the list and the invalid
5001 * list will be moved to the head, skip the invalid page
5002 * can help us to avoid the infinity list walking.
5004 if (sp->role.invalid)
5008 * Need not flush tlb since we only zap the sp with invalid
5009 * generation number.
5011 if (batch >= BATCH_ZAP_PAGES &&
5012 cond_resched_lock(&kvm->mmu_lock)) {
5017 ret = kvm_mmu_prepare_zap_page(kvm, sp,
5018 &kvm->arch.zapped_obsolete_pages);
5026 * Should flush tlb before free page tables since lockless-walking
5027 * may use the pages.
5029 kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
5033 * Fast invalidate all shadow pages and use lock-break technique
5034 * to zap obsolete pages.
5036 * It's required when memslot is being deleted or VM is being
5037 * destroyed, in these cases, we should ensure that KVM MMU does
5038 * not use any resource of the being-deleted slot or all slots
5039 * after calling the function.
5041 void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm)
5043 spin_lock(&kvm->mmu_lock);
5044 trace_kvm_mmu_invalidate_zap_all_pages(kvm);
5045 kvm->arch.mmu_valid_gen++;
5048 * Notify all vcpus to reload its shadow page table
5049 * and flush TLB. Then all vcpus will switch to new
5050 * shadow page table with the new mmu_valid_gen.
5052 * Note: we should do this under the protection of
5053 * mmu-lock, otherwise, vcpu would purge shadow page
5054 * but miss tlb flush.
5056 kvm_reload_remote_mmus(kvm);
5058 kvm_zap_obsolete_pages(kvm);
5059 spin_unlock(&kvm->mmu_lock);
5062 static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
5064 return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
5067 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
5070 * The very rare case: if the generation-number is round,
5071 * zap all shadow pages.
5073 if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) {
5074 printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n");
5075 kvm_mmu_invalidate_zap_all_pages(kvm);
5079 static unsigned long
5080 mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
5083 int nr_to_scan = sc->nr_to_scan;
5084 unsigned long freed = 0;
5086 mutex_lock(&kvm_lock);
5088 list_for_each_entry(kvm, &vm_list, vm_list) {
5090 LIST_HEAD(invalid_list);
5093 * Never scan more than sc->nr_to_scan VM instances.
5094 * Will not hit this condition practically since we do not try
5095 * to shrink more than one VM and it is very unlikely to see
5096 * !n_used_mmu_pages so many times.
5101 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
5102 * here. We may skip a VM instance errorneosly, but we do not
5103 * want to shrink a VM that only started to populate its MMU
5106 if (!kvm->arch.n_used_mmu_pages &&
5107 !kvm_has_zapped_obsolete_pages(kvm))
5110 idx = srcu_read_lock(&kvm->srcu);
5111 spin_lock(&kvm->mmu_lock);
5113 if (kvm_has_zapped_obsolete_pages(kvm)) {
5114 kvm_mmu_commit_zap_page(kvm,
5115 &kvm->arch.zapped_obsolete_pages);
5119 if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
5121 kvm_mmu_commit_zap_page(kvm, &invalid_list);
5124 spin_unlock(&kvm->mmu_lock);
5125 srcu_read_unlock(&kvm->srcu, idx);
5128 * unfair on small ones
5129 * per-vm shrinkers cry out
5130 * sadness comes quickly
5132 list_move_tail(&kvm->vm_list, &vm_list);
5136 mutex_unlock(&kvm_lock);
5140 static unsigned long
5141 mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
5143 return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
5146 static struct shrinker mmu_shrinker = {
5147 .count_objects = mmu_shrink_count,
5148 .scan_objects = mmu_shrink_scan,
5149 .seeks = DEFAULT_SEEKS * 10,
5152 static void mmu_destroy_caches(void)
5154 if (pte_list_desc_cache)
5155 kmem_cache_destroy(pte_list_desc_cache);
5156 if (mmu_page_header_cache)
5157 kmem_cache_destroy(mmu_page_header_cache);
5160 static bool get_nx_auto_mode(void)
5162 /* Return true when CPU has the bug, and mitigations are ON */
5163 return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
5166 static void __set_nx_huge_pages(bool val)
5168 nx_huge_pages = itlb_multihit_kvm_mitigation = val;
5171 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
5173 bool old_val = nx_huge_pages;
5176 /* In "auto" mode deploy workaround only if CPU has the bug. */
5177 if (sysfs_streq(val, "off"))
5179 else if (sysfs_streq(val, "force"))
5181 else if (sysfs_streq(val, "auto"))
5182 new_val = get_nx_auto_mode();
5183 else if (strtobool(val, &new_val) < 0)
5186 __set_nx_huge_pages(new_val);
5188 if (new_val != old_val) {
5192 mutex_lock(&kvm_lock);
5194 list_for_each_entry(kvm, &vm_list, vm_list) {
5195 idx = srcu_read_lock(&kvm->srcu);
5196 kvm_mmu_invalidate_zap_all_pages(kvm);
5197 srcu_read_unlock(&kvm->srcu, idx);
5199 wake_up_process(kvm->arch.nx_lpage_recovery_thread);
5201 mutex_unlock(&kvm_lock);
5207 int kvm_mmu_module_init(void)
5209 if (nx_huge_pages == -1)
5210 __set_nx_huge_pages(get_nx_auto_mode());
5212 pte_list_desc_cache = kmem_cache_create("pte_list_desc",
5213 sizeof(struct pte_list_desc),
5214 0, SLAB_ACCOUNT, NULL);
5215 if (!pte_list_desc_cache)
5218 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
5219 sizeof(struct kvm_mmu_page),
5220 0, SLAB_ACCOUNT, NULL);
5221 if (!mmu_page_header_cache)
5224 if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
5227 register_shrinker(&mmu_shrinker);
5232 mmu_destroy_caches();
5237 * Caculate mmu pages needed for kvm.
5239 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
5241 unsigned int nr_mmu_pages;
5242 unsigned int nr_pages = 0;
5243 struct kvm_memslots *slots;
5244 struct kvm_memory_slot *memslot;
5247 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
5248 slots = __kvm_memslots(kvm, i);
5250 kvm_for_each_memslot(memslot, slots)
5251 nr_pages += memslot->npages;
5254 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
5255 nr_mmu_pages = max(nr_mmu_pages,
5256 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
5258 return nr_mmu_pages;
5261 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
5263 kvm_mmu_unload(vcpu);
5264 free_mmu_pages(vcpu);
5265 mmu_free_memory_caches(vcpu);
5268 void kvm_mmu_module_exit(void)
5270 mmu_destroy_caches();
5271 percpu_counter_destroy(&kvm_total_used_mmu_pages);
5272 unregister_shrinker(&mmu_shrinker);
5273 mmu_audit_disable();
5276 static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp)
5278 unsigned int old_val;
5281 old_val = nx_huge_pages_recovery_ratio;
5282 err = param_set_uint(val, kp);
5286 if (READ_ONCE(nx_huge_pages) &&
5287 !old_val && nx_huge_pages_recovery_ratio) {
5290 mutex_lock(&kvm_lock);
5292 list_for_each_entry(kvm, &vm_list, vm_list)
5293 wake_up_process(kvm->arch.nx_lpage_recovery_thread);
5295 mutex_unlock(&kvm_lock);
5301 static void kvm_recover_nx_lpages(struct kvm *kvm)
5304 struct kvm_mmu_page *sp;
5306 LIST_HEAD(invalid_list);
5309 rcu_idx = srcu_read_lock(&kvm->srcu);
5310 spin_lock(&kvm->mmu_lock);
5312 ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
5313 to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0;
5314 while (to_zap && !list_empty(&kvm->arch.lpage_disallowed_mmu_pages)) {
5316 * We use a separate list instead of just using active_mmu_pages
5317 * because the number of lpage_disallowed pages is expected to
5318 * be relatively small compared to the total.
5320 sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
5321 struct kvm_mmu_page,
5322 lpage_disallowed_link);
5323 WARN_ON_ONCE(!sp->lpage_disallowed);
5324 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
5325 WARN_ON_ONCE(sp->lpage_disallowed);
5327 if (!--to_zap || need_resched() || spin_needbreak(&kvm->mmu_lock)) {
5328 kvm_mmu_commit_zap_page(kvm, &invalid_list);
5330 cond_resched_lock(&kvm->mmu_lock);
5333 kvm_mmu_commit_zap_page(kvm, &invalid_list);
5335 spin_unlock(&kvm->mmu_lock);
5336 srcu_read_unlock(&kvm->srcu, rcu_idx);
5339 static long get_nx_lpage_recovery_timeout(u64 start_time)
5341 return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio)
5342 ? start_time + 60 * HZ - get_jiffies_64()
5343 : MAX_SCHEDULE_TIMEOUT;
5346 static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
5349 long remaining_time;
5352 start_time = get_jiffies_64();
5353 remaining_time = get_nx_lpage_recovery_timeout(start_time);
5355 set_current_state(TASK_INTERRUPTIBLE);
5356 while (!kthread_should_stop() && remaining_time > 0) {
5357 schedule_timeout(remaining_time);
5358 remaining_time = get_nx_lpage_recovery_timeout(start_time);
5359 set_current_state(TASK_INTERRUPTIBLE);
5362 set_current_state(TASK_RUNNING);
5364 if (kthread_should_stop())
5367 kvm_recover_nx_lpages(kvm);
5371 int kvm_mmu_post_init_vm(struct kvm *kvm)
5375 err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
5376 "kvm-nx-lpage-recovery",
5377 &kvm->arch.nx_lpage_recovery_thread);
5379 kthread_unpark(kvm->arch.nx_lpage_recovery_thread);
5384 void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
5386 if (kvm->arch.nx_lpage_recovery_thread)
5387 kthread_stop(kvm->arch.nx_lpage_recovery_thread);