1 // SPDX-License-Identifier: GPL-2.0-only
3 #ifndef KVM_X86_MMU_SPTE_H
4 #define KVM_X86_MMU_SPTE_H
6 #include "mmu_internal.h"
8 extern bool __read_mostly enable_mmio_caching;
11 * A MMU present SPTE is backed by actual memory and may or may not be present
12 * in hardware. E.g. MMIO SPTEs are not considered present. Use bit 11, as it
13 * is ignored by all flavors of SPTEs and checking a low bit often generates
14 * better code than for a high bit, e.g. 56+. MMU present checks are pervasive
15 * enough that the improved code generation is noticeable in KVM's footprint.
17 #define SPTE_MMU_PRESENT_MASK BIT_ULL(11)
20 * TDP SPTES (more specifically, EPT SPTEs) may not have A/D bits, and may also
21 * be restricted to using write-protection (for L2 when CPU dirty logging, i.e.
22 * PML, is enabled). Use bits 52 and 53 to hold the type of A/D tracking that
23 * is must be employed for a given TDP SPTE.
25 * Note, the "enabled" mask must be '0', as bits 62:52 are _reserved_ for PAE
26 * paging, including NPT PAE. This scheme works because legacy shadow paging
27 * is guaranteed to have A/D bits and write-protection is forced only for
28 * TDP with CPU dirty logging (PML). If NPT ever gains PML-like support, it
29 * must be restricted to 64-bit KVM.
31 #define SPTE_TDP_AD_SHIFT 52
32 #define SPTE_TDP_AD_MASK (3ULL << SPTE_TDP_AD_SHIFT)
33 #define SPTE_TDP_AD_ENABLED_MASK (0ULL << SPTE_TDP_AD_SHIFT)
34 #define SPTE_TDP_AD_DISABLED_MASK (1ULL << SPTE_TDP_AD_SHIFT)
35 #define SPTE_TDP_AD_WRPROT_ONLY_MASK (2ULL << SPTE_TDP_AD_SHIFT)
36 static_assert(SPTE_TDP_AD_ENABLED_MASK == 0);
38 #ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
39 #define PT64_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1))
41 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
44 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
45 | shadow_x_mask | shadow_nx_mask | shadow_me_mask)
47 #define ACC_EXEC_MASK 1
48 #define ACC_WRITE_MASK PT_WRITABLE_MASK
49 #define ACC_USER_MASK PT_USER_MASK
50 #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
52 /* The mask for the R/X bits in EPT PTEs */
53 #define PT64_EPT_READABLE_MASK 0x1ull
54 #define PT64_EPT_EXECUTABLE_MASK 0x4ull
56 #define PT64_LEVEL_BITS 9
58 #define PT64_LEVEL_SHIFT(level) \
59 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
61 #define PT64_INDEX(address, level)\
62 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
63 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
66 * The mask/shift to use for saving the original R/X bits when marking the PTE
67 * as not-present for access tracking purposes. We do not save the W bit as the
68 * PTEs being access tracked also need to be dirty tracked, so the W bit will be
69 * restored only when a write is attempted to the page. This mask obviously
70 * must not overlap the A/D type mask.
72 #define SHADOW_ACC_TRACK_SAVED_BITS_MASK (PT64_EPT_READABLE_MASK | \
73 PT64_EPT_EXECUTABLE_MASK)
74 #define SHADOW_ACC_TRACK_SAVED_BITS_SHIFT 54
75 #define SHADOW_ACC_TRACK_SAVED_MASK (SHADOW_ACC_TRACK_SAVED_BITS_MASK << \
76 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
77 static_assert(!(SPTE_TDP_AD_MASK & SHADOW_ACC_TRACK_SAVED_MASK));
80 * {DEFAULT,EPT}_SPTE_{HOST,MMU}_WRITABLE are used to keep track of why a given
81 * SPTE is write-protected. See is_writable_pte() for details.
84 /* Bits 9 and 10 are ignored by all non-EPT PTEs. */
85 #define DEFAULT_SPTE_HOST_WRITABLE BIT_ULL(9)
86 #define DEFAULT_SPTE_MMU_WRITABLE BIT_ULL(10)
89 * Low ignored bits are at a premium for EPT, use high ignored bits, taking care
90 * to not overlap the A/D type mask or the saved access bits of access-tracked
91 * SPTEs when A/D bits are disabled.
93 #define EPT_SPTE_HOST_WRITABLE BIT_ULL(57)
94 #define EPT_SPTE_MMU_WRITABLE BIT_ULL(58)
96 static_assert(!(EPT_SPTE_HOST_WRITABLE & SPTE_TDP_AD_MASK));
97 static_assert(!(EPT_SPTE_MMU_WRITABLE & SPTE_TDP_AD_MASK));
98 static_assert(!(EPT_SPTE_HOST_WRITABLE & SHADOW_ACC_TRACK_SAVED_MASK));
99 static_assert(!(EPT_SPTE_MMU_WRITABLE & SHADOW_ACC_TRACK_SAVED_MASK));
101 /* Defined only to keep the above static asserts readable. */
102 #undef SHADOW_ACC_TRACK_SAVED_MASK
105 * Due to limited space in PTEs, the MMIO generation is a 19 bit subset of
106 * the memslots generation and is derived as follows:
108 * Bits 0-7 of the MMIO generation are propagated to spte bits 3-10
109 * Bits 8-18 of the MMIO generation are propagated to spte bits 52-62
111 * The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in
112 * the MMIO generation number, as doing so would require stealing a bit from
113 * the "real" generation number and thus effectively halve the maximum number
114 * of MMIO generations that can be handled before encountering a wrap (which
115 * requires a full MMU zap). The flag is instead explicitly queried when
116 * checking for MMIO spte cache hits.
119 #define MMIO_SPTE_GEN_LOW_START 3
120 #define MMIO_SPTE_GEN_LOW_END 10
122 #define MMIO_SPTE_GEN_HIGH_START 52
123 #define MMIO_SPTE_GEN_HIGH_END 62
125 #define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \
126 MMIO_SPTE_GEN_LOW_START)
127 #define MMIO_SPTE_GEN_HIGH_MASK GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \
128 MMIO_SPTE_GEN_HIGH_START)
129 static_assert(!(SPTE_MMU_PRESENT_MASK &
130 (MMIO_SPTE_GEN_LOW_MASK | MMIO_SPTE_GEN_HIGH_MASK)));
132 #define MMIO_SPTE_GEN_LOW_BITS (MMIO_SPTE_GEN_LOW_END - MMIO_SPTE_GEN_LOW_START + 1)
133 #define MMIO_SPTE_GEN_HIGH_BITS (MMIO_SPTE_GEN_HIGH_END - MMIO_SPTE_GEN_HIGH_START + 1)
135 /* remember to adjust the comment above as well if you change these */
136 static_assert(MMIO_SPTE_GEN_LOW_BITS == 8 && MMIO_SPTE_GEN_HIGH_BITS == 11);
138 #define MMIO_SPTE_GEN_LOW_SHIFT (MMIO_SPTE_GEN_LOW_START - 0)
139 #define MMIO_SPTE_GEN_HIGH_SHIFT (MMIO_SPTE_GEN_HIGH_START - MMIO_SPTE_GEN_LOW_BITS)
141 #define MMIO_SPTE_GEN_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0)
143 extern u64 __read_mostly shadow_host_writable_mask;
144 extern u64 __read_mostly shadow_mmu_writable_mask;
145 extern u64 __read_mostly shadow_nx_mask;
146 extern u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
147 extern u64 __read_mostly shadow_user_mask;
148 extern u64 __read_mostly shadow_accessed_mask;
149 extern u64 __read_mostly shadow_dirty_mask;
150 extern u64 __read_mostly shadow_mmio_value;
151 extern u64 __read_mostly shadow_mmio_mask;
152 extern u64 __read_mostly shadow_mmio_access_mask;
153 extern u64 __read_mostly shadow_present_mask;
154 extern u64 __read_mostly shadow_me_value;
155 extern u64 __read_mostly shadow_me_mask;
158 * SPTEs in MMUs without A/D bits are marked with SPTE_TDP_AD_DISABLED_MASK;
159 * shadow_acc_track_mask is the set of bits to be cleared in non-accessed
162 extern u64 __read_mostly shadow_acc_track_mask;
165 * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order
166 * to guard against L1TF attacks.
168 extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
171 * The number of high-order 1 bits to use in the mask above.
173 #define SHADOW_NONPRESENT_OR_RSVD_MASK_LEN 5
176 * If a thread running without exclusive control of the MMU lock must perform a
177 * multi-part operation on an SPTE, it can set the SPTE to REMOVED_SPTE as a
178 * non-present intermediate value. Other threads which encounter this value
179 * should not modify the SPTE.
181 * Use a semi-arbitrary value that doesn't set RWX bits, i.e. is not-present on
182 * bot AMD and Intel CPUs, and doesn't set PFN bits, i.e. doesn't create a L1TF
183 * vulnerability. Use only low bits to avoid 64-bit immediates.
185 * Only used by the TDP MMU.
187 #define REMOVED_SPTE 0x5a0ULL
189 /* Removed SPTEs must not be misconstrued as shadow present PTEs. */
190 static_assert(!(REMOVED_SPTE & SPTE_MMU_PRESENT_MASK));
192 static inline bool is_removed_spte(u64 spte)
194 return spte == REMOVED_SPTE;
198 * In some cases, we need to preserve the GFN of a non-present or reserved
199 * SPTE when we usurp the upper five bits of the physical address space to
200 * defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll
201 * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
202 * left into the reserved bits, i.e. the GFN in the SPTE will be split into
203 * high and low parts. This mask covers the lower bits of the GFN.
205 extern u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
207 static inline bool is_mmio_spte(u64 spte)
209 return (spte & shadow_mmio_mask) == shadow_mmio_value &&
210 likely(enable_mmio_caching);
213 static inline bool is_shadow_present_pte(u64 pte)
215 return !!(pte & SPTE_MMU_PRESENT_MASK);
219 * Returns true if A/D bits are supported in hardware and are enabled by KVM.
220 * When enabled, KVM uses A/D bits for all non-nested MMUs. Because L1 can
221 * disable A/D bits in EPTP12, SP and SPTE variants are needed to handle the
222 * scenario where KVM is using A/D bits for L1, but not L2.
224 static inline bool kvm_ad_enabled(void)
226 return !!shadow_accessed_mask;
229 static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
231 return sp->role.ad_disabled;
234 static inline bool spte_ad_enabled(u64 spte)
236 MMU_WARN_ON(!is_shadow_present_pte(spte));
237 return (spte & SPTE_TDP_AD_MASK) != SPTE_TDP_AD_DISABLED_MASK;
240 static inline bool spte_ad_need_write_protect(u64 spte)
242 MMU_WARN_ON(!is_shadow_present_pte(spte));
244 * This is benign for non-TDP SPTEs as SPTE_TDP_AD_ENABLED_MASK is '0',
245 * and non-TDP SPTEs will never set these bits. Optimize for 64-bit
246 * TDP and do the A/D type check unconditionally.
248 return (spte & SPTE_TDP_AD_MASK) != SPTE_TDP_AD_ENABLED_MASK;
251 static inline u64 spte_shadow_accessed_mask(u64 spte)
253 MMU_WARN_ON(!is_shadow_present_pte(spte));
254 return spte_ad_enabled(spte) ? shadow_accessed_mask : 0;
257 static inline u64 spte_shadow_dirty_mask(u64 spte)
259 MMU_WARN_ON(!is_shadow_present_pte(spte));
260 return spte_ad_enabled(spte) ? shadow_dirty_mask : 0;
263 static inline bool is_access_track_spte(u64 spte)
265 return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0;
268 static inline bool is_large_pte(u64 pte)
270 return pte & PT_PAGE_SIZE_MASK;
273 static inline bool is_last_spte(u64 pte, int level)
275 return (level == PG_LEVEL_4K) || is_large_pte(pte);
278 static inline bool is_executable_pte(u64 spte)
280 return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask;
283 static inline kvm_pfn_t spte_to_pfn(u64 pte)
285 return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
288 static inline bool is_accessed_spte(u64 spte)
290 u64 accessed_mask = spte_shadow_accessed_mask(spte);
292 return accessed_mask ? spte & accessed_mask
293 : !is_access_track_spte(spte);
296 static inline bool is_dirty_spte(u64 spte)
298 u64 dirty_mask = spte_shadow_dirty_mask(spte);
300 return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK;
303 static inline u64 get_rsvd_bits(struct rsvd_bits_validate *rsvd_check, u64 pte,
306 int bit7 = (pte >> 7) & 1;
308 return rsvd_check->rsvd_bits_mask[bit7][level-1];
311 static inline bool __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check,
314 return pte & get_rsvd_bits(rsvd_check, pte, level);
317 static inline bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check,
320 return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
323 static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
326 return __is_bad_mt_xwr(rsvd_check, spte) ||
327 __is_rsvd_bits_set(rsvd_check, spte, level);
331 * An shadow-present leaf SPTE may be non-writable for 3 possible reasons:
333 * 1. To intercept writes for dirty logging. KVM write-protects huge pages
334 * so that they can be split be split down into the dirty logging
335 * granularity (4KiB) whenever the guest writes to them. KVM also
336 * write-protects 4KiB pages so that writes can be recorded in the dirty log
337 * (e.g. if not using PML). SPTEs are write-protected for dirty logging
338 * during the VM-iotcls that enable dirty logging.
340 * 2. To intercept writes to guest page tables that KVM is shadowing. When a
341 * guest writes to its page table the corresponding shadow page table will
342 * be marked "unsync". That way KVM knows which shadow page tables need to
343 * be updated on the next TLB flush, INVLPG, etc. and which do not.
345 * 3. To prevent guest writes to read-only memory, such as for memory in a
346 * read-only memslot or guest memory backed by a read-only VMA. Writes to
347 * such pages are disallowed entirely.
349 * To keep track of why a given SPTE is write-protected, KVM uses 2
350 * software-only bits in the SPTE:
352 * shadow_mmu_writable_mask, aka MMU-writable -
353 * Cleared on SPTEs that KVM is currently write-protecting for shadow paging
354 * purposes (case 2 above).
356 * shadow_host_writable_mask, aka Host-writable -
357 * Cleared on SPTEs that are not host-writable (case 3 above)
359 * Note, not all possible combinations of PT_WRITABLE_MASK,
360 * shadow_mmu_writable_mask, and shadow_host_writable_mask are valid. A given
361 * SPTE can be in only one of the following states, which map to the
362 * aforementioned 3 cases:
364 * shadow_host_writable_mask | shadow_mmu_writable_mask | PT_WRITABLE_MASK
365 * ------------------------- | ------------------------ | ----------------
366 * 1 | 1 | 1 (writable)
371 * The valid combinations of these bits are checked by
372 * check_spte_writable_invariants() whenever an SPTE is modified.
374 * Clearing the MMU-writable bit is always done under the MMU lock and always
375 * accompanied by a TLB flush before dropping the lock to avoid corrupting the
376 * shadow page tables between vCPUs. Write-protecting an SPTE for dirty logging
377 * (which does not clear the MMU-writable bit), does not flush TLBs before
378 * dropping the lock, as it only needs to synchronize guest writes with the
381 * So, there is the problem: clearing the MMU-writable bit can encounter a
382 * write-protected SPTE while CPUs still have writable mappings for that SPTE
383 * cached in their TLB. To address this, KVM always flushes TLBs when
384 * write-protecting SPTEs if the MMU-writable bit is set on the old SPTE.
386 * The Host-writable bit is not modified on present SPTEs, it is only set or
387 * cleared when an SPTE is first faulted in from non-present and then remains
390 static inline bool is_writable_pte(unsigned long pte)
392 return pte & PT_WRITABLE_MASK;
395 /* Note: spte must be a shadow-present leaf SPTE. */
396 static inline void check_spte_writable_invariants(u64 spte)
398 if (spte & shadow_mmu_writable_mask)
399 WARN_ONCE(!(spte & shadow_host_writable_mask),
400 "kvm: MMU-writable SPTE is not Host-writable: %llx",
403 WARN_ONCE(is_writable_pte(spte),
404 "kvm: Writable SPTE is not MMU-writable: %llx", spte);
407 static inline bool is_mmu_writable_spte(u64 spte)
409 return spte & shadow_mmu_writable_mask;
412 static inline u64 get_mmio_spte_generation(u64 spte)
416 gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_SHIFT;
417 gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_SHIFT;
421 bool spte_has_volatile_bits(u64 spte);
423 bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
424 const struct kvm_memory_slot *slot,
425 unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
426 u64 old_spte, bool prefetch, bool can_unsync,
427 bool host_writable, u64 *new_spte);
428 u64 make_huge_page_split_spte(u64 huge_spte, int huge_level, int index);
429 u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled);
430 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access);
431 u64 mark_spte_for_access_track(u64 spte);
433 /* Restore an acc-track PTE back to a regular PTE */
434 static inline u64 restore_acc_track_spte(u64 spte)
436 u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
437 & SHADOW_ACC_TRACK_SAVED_BITS_MASK;
439 spte &= ~shadow_acc_track_mask;
440 spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
441 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
447 u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn);
449 void kvm_mmu_reset_all_pte_masks(void);