1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
5 * Macros and functions to access KVM PTEs (also known as SPTEs)
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2020 Red Hat, Inc. and/or its affiliates.
12 #include <linux/kvm_host.h>
14 #include "mmu_internal.h"
18 #include <asm/e820/api.h>
19 #include <asm/memtype.h>
22 bool __read_mostly enable_mmio_caching = true;
23 module_param_named(mmio_caching, enable_mmio_caching, bool, 0444);
25 u64 __read_mostly shadow_host_writable_mask;
26 u64 __read_mostly shadow_mmu_writable_mask;
27 u64 __read_mostly shadow_nx_mask;
28 u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
29 u64 __read_mostly shadow_user_mask;
30 u64 __read_mostly shadow_accessed_mask;
31 u64 __read_mostly shadow_dirty_mask;
32 u64 __read_mostly shadow_mmio_value;
33 u64 __read_mostly shadow_mmio_mask;
34 u64 __read_mostly shadow_mmio_access_mask;
35 u64 __read_mostly shadow_present_mask;
36 u64 __read_mostly shadow_me_value;
37 u64 __read_mostly shadow_me_mask;
38 u64 __read_mostly shadow_acc_track_mask;
40 u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
41 u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
43 u8 __read_mostly shadow_phys_bits;
45 static u64 generation_mmio_spte_mask(u64 gen)
49 WARN_ON(gen & ~MMIO_SPTE_GEN_MASK);
51 mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK;
52 mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK;
56 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
58 u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK;
59 u64 spte = generation_mmio_spte_mask(gen);
60 u64 gpa = gfn << PAGE_SHIFT;
62 WARN_ON_ONCE(!shadow_mmio_value);
64 access &= shadow_mmio_access_mask;
65 spte |= shadow_mmio_value | access;
66 spte |= gpa | shadow_nonpresent_or_rsvd_mask;
67 spte |= (gpa & shadow_nonpresent_or_rsvd_mask)
68 << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
73 static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
76 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
78 * Some reserved pages, such as those from NVDIMM
79 * DAX devices, are not for MMIO, and can be mapped
80 * with cached memory type for better performance.
81 * However, the above check misconceives those pages
82 * as MMIO, and results in KVM mapping them with UC
83 * memory type, which would hurt the performance.
84 * Therefore, we check the host memory type in addition
85 * and only treat UC/UC-/WC pages as MMIO.
87 (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn));
89 return !e820__mapped_raw_any(pfn_to_hpa(pfn),
90 pfn_to_hpa(pfn + 1) - 1,
95 * Returns true if the SPTE has bits that may be set without holding mmu_lock.
96 * The caller is responsible for checking if the SPTE is shadow-present, and
97 * for determining whether or not the caller cares about non-leaf SPTEs.
99 bool spte_has_volatile_bits(u64 spte)
102 * Always atomically update spte if it can be updated
103 * out of mmu-lock, it can ensure dirty bit is not lost,
104 * also, it can help us to get a stable is_writable_pte()
105 * to ensure tlb flush is not missed.
107 if (!is_writable_pte(spte) && is_mmu_writable_spte(spte))
110 if (is_access_track_spte(spte))
113 if (spte_ad_enabled(spte)) {
114 if (!(spte & shadow_accessed_mask) ||
115 (is_writable_pte(spte) && !(spte & shadow_dirty_mask)))
122 bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
123 const struct kvm_memory_slot *slot,
124 unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
125 u64 old_spte, bool prefetch, bool can_unsync,
126 bool host_writable, u64 *new_spte)
128 int level = sp->role.level;
129 u64 spte = SPTE_MMU_PRESENT_MASK;
132 if (sp->role.ad_disabled)
133 spte |= SPTE_TDP_AD_DISABLED_MASK;
134 else if (kvm_mmu_page_ad_need_write_protect(sp))
135 spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK;
138 * For the EPT case, shadow_present_mask is 0 if hardware
139 * supports exec-only page table entries. In that case,
140 * ACC_USER_MASK and shadow_user_mask are used to represent
141 * read access. See FNAME(gpte_access) in paging_tmpl.h.
143 spte |= shadow_present_mask;
145 spte |= spte_shadow_accessed_mask(spte);
147 if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) &&
148 is_nx_huge_page_enabled()) {
149 pte_access &= ~ACC_EXEC_MASK;
152 if (pte_access & ACC_EXEC_MASK)
153 spte |= shadow_x_mask;
155 spte |= shadow_nx_mask;
157 if (pte_access & ACC_USER_MASK)
158 spte |= shadow_user_mask;
160 if (level > PG_LEVEL_4K)
161 spte |= PT_PAGE_SIZE_MASK;
163 spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn,
164 kvm_is_mmio_pfn(pfn));
167 spte |= shadow_host_writable_mask;
169 pte_access &= ~ACC_WRITE_MASK;
171 if (shadow_me_value && !kvm_is_mmio_pfn(pfn))
172 spte |= shadow_me_value;
174 spte |= (u64)pfn << PAGE_SHIFT;
176 if (pte_access & ACC_WRITE_MASK) {
177 spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask;
180 * Optimization: for pte sync, if spte was writable the hash
181 * lookup is unnecessary (and expensive). Write protection
182 * is responsibility of kvm_mmu_get_page / kvm_mmu_sync_roots.
183 * Same reasoning can be applied to dirty page accounting.
185 if (is_writable_pte(old_spte))
189 * Unsync shadow pages that are reachable by the new, writable
190 * SPTE. Write-protect the SPTE if the page can't be unsync'd,
191 * e.g. it's write-tracked (upper-level SPs) or has one or more
192 * shadow pages and unsync'ing pages is not allowed.
194 if (mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, can_unsync, prefetch)) {
195 pgprintk("%s: found shadow page for %llx, marking ro\n",
198 pte_access &= ~ACC_WRITE_MASK;
199 spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
203 if (pte_access & ACC_WRITE_MASK)
204 spte |= spte_shadow_dirty_mask(spte);
208 spte = mark_spte_for_access_track(spte);
210 WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level),
211 "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level,
212 get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level));
214 if ((spte & PT_WRITABLE_MASK) && kvm_slot_dirty_track_enabled(slot)) {
215 /* Enforced by kvm_mmu_hugepage_adjust. */
216 WARN_ON(level > PG_LEVEL_4K);
217 mark_page_dirty_in_slot(vcpu->kvm, slot, gfn);
224 static u64 make_spte_executable(u64 spte)
226 bool is_access_track = is_access_track_spte(spte);
229 spte = restore_acc_track_spte(spte);
231 spte &= ~shadow_nx_mask;
232 spte |= shadow_x_mask;
235 spte = mark_spte_for_access_track(spte);
241 * Construct an SPTE that maps a sub-page of the given huge page SPTE where
242 * `index` identifies which sub-page.
244 * This is used during huge page splitting to build the SPTEs that make up the
247 u64 make_huge_page_split_spte(u64 huge_spte, int huge_level, int index)
252 if (WARN_ON_ONCE(!is_shadow_present_pte(huge_spte)))
255 if (WARN_ON_ONCE(!is_large_pte(huge_spte)))
258 child_spte = huge_spte;
259 child_level = huge_level - 1;
262 * The child_spte already has the base address of the huge page being
263 * split. So we just have to OR in the offset to the page at the next
264 * lower level for the given index.
266 child_spte |= (index * KVM_PAGES_PER_HPAGE(child_level)) << PAGE_SHIFT;
268 if (child_level == PG_LEVEL_4K) {
269 child_spte &= ~PT_PAGE_SIZE_MASK;
272 * When splitting to a 4K page, mark the page executable as the
273 * NX hugepage mitigation no longer applies.
275 if (is_nx_huge_page_enabled())
276 child_spte = make_spte_executable(child_spte);
283 u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled)
285 u64 spte = SPTE_MMU_PRESENT_MASK;
287 spte |= __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK |
288 shadow_user_mask | shadow_x_mask | shadow_me_value;
291 spte |= SPTE_TDP_AD_DISABLED_MASK;
293 spte |= shadow_accessed_mask;
298 u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn)
302 new_spte = old_spte & ~PT64_BASE_ADDR_MASK;
303 new_spte |= (u64)new_pfn << PAGE_SHIFT;
305 new_spte &= ~PT_WRITABLE_MASK;
306 new_spte &= ~shadow_host_writable_mask;
307 new_spte &= ~shadow_mmu_writable_mask;
309 new_spte = mark_spte_for_access_track(new_spte);
314 u64 mark_spte_for_access_track(u64 spte)
316 if (spte_ad_enabled(spte))
317 return spte & ~shadow_accessed_mask;
319 if (is_access_track_spte(spte))
322 check_spte_writable_invariants(spte);
324 WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
325 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT),
326 "kvm: Access Tracking saved bit locations are not zero\n");
328 spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) <<
329 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT;
330 spte &= ~shadow_acc_track_mask;
335 void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
337 BUG_ON((u64)(unsigned)access_mask != access_mask);
338 WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
340 if (!enable_mmio_caching)
344 * Disable MMIO caching if the MMIO value collides with the bits that
345 * are used to hold the relocated GFN when the L1TF mitigation is
346 * enabled. This should never fire as there is no known hardware that
347 * can trigger this condition, e.g. SME/SEV CPUs that require a custom
348 * MMIO value are not susceptible to L1TF.
350 if (WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask <<
351 SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)))
355 * The masked MMIO value must obviously match itself and a removed SPTE
356 * must not get a false positive. Removed SPTEs and MMIO SPTEs should
357 * never collide as MMIO must set some RWX bits, and removed SPTEs must
358 * not set any RWX bits.
360 if (WARN_ON((mmio_value & mmio_mask) != mmio_value) ||
361 WARN_ON(mmio_value && (REMOVED_SPTE & mmio_mask) == mmio_value))
365 enable_mmio_caching = false;
367 shadow_mmio_value = mmio_value;
368 shadow_mmio_mask = mmio_mask;
369 shadow_mmio_access_mask = access_mask;
371 EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
373 void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask)
375 /* shadow_me_value must be a subset of shadow_me_mask */
376 if (WARN_ON(me_value & ~me_mask))
377 me_value = me_mask = 0;
379 shadow_me_value = me_value;
380 shadow_me_mask = me_mask;
382 EXPORT_SYMBOL_GPL(kvm_mmu_set_me_spte_mask);
384 void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
386 shadow_user_mask = VMX_EPT_READABLE_MASK;
387 shadow_accessed_mask = has_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull;
388 shadow_dirty_mask = has_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull;
389 shadow_nx_mask = 0ull;
390 shadow_x_mask = VMX_EPT_EXECUTABLE_MASK;
391 shadow_present_mask = has_exec_only ? 0ull : VMX_EPT_READABLE_MASK;
392 shadow_acc_track_mask = VMX_EPT_RWX_MASK;
393 shadow_host_writable_mask = EPT_SPTE_HOST_WRITABLE;
394 shadow_mmu_writable_mask = EPT_SPTE_MMU_WRITABLE;
397 * EPT Misconfigurations are generated if the value of bits 2:0
398 * of an EPT paging-structure entry is 110b (write/execute).
400 kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE,
401 VMX_EPT_RWX_MASK, 0);
403 EXPORT_SYMBOL_GPL(kvm_mmu_set_ept_masks);
405 void kvm_mmu_reset_all_pte_masks(void)
410 shadow_phys_bits = kvm_get_shadow_phys_bits();
413 * If the CPU has 46 or less physical address bits, then set an
414 * appropriate mask to guard against L1TF attacks. Otherwise, it is
415 * assumed that the CPU is not vulnerable to L1TF.
417 * Some Intel CPUs address the L1 cache using more PA bits than are
418 * reported by CPUID. Use the PA width of the L1 cache when possible
419 * to achieve more effective mitigation, e.g. if system RAM overlaps
420 * the most significant bits of legal physical address space.
422 shadow_nonpresent_or_rsvd_mask = 0;
423 low_phys_bits = boot_cpu_data.x86_phys_bits;
424 if (boot_cpu_has_bug(X86_BUG_L1TF) &&
425 !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >=
426 52 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)) {
427 low_phys_bits = boot_cpu_data.x86_cache_bits
428 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
429 shadow_nonpresent_or_rsvd_mask =
430 rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
433 shadow_nonpresent_or_rsvd_lower_gfn_mask =
434 GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
436 shadow_user_mask = PT_USER_MASK;
437 shadow_accessed_mask = PT_ACCESSED_MASK;
438 shadow_dirty_mask = PT_DIRTY_MASK;
439 shadow_nx_mask = PT64_NX_MASK;
441 shadow_present_mask = PT_PRESENT_MASK;
442 shadow_acc_track_mask = 0;
446 shadow_host_writable_mask = DEFAULT_SPTE_HOST_WRITABLE;
447 shadow_mmu_writable_mask = DEFAULT_SPTE_MMU_WRITABLE;
450 * Set a reserved PA bit in MMIO SPTEs to generate page faults with
451 * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT
452 * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports
453 * 52-bit physical addresses then there are no reserved PA bits in the
454 * PTEs and so the reserved PA approach must be disabled.
456 if (shadow_phys_bits < 52)
457 mask = BIT_ULL(51) | PT_PRESENT_MASK;
461 kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);