1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_MMU_CONTEXT_H
3 #define _ASM_X86_MMU_CONTEXT_H
6 #include <linux/atomic.h>
7 #include <linux/mm_types.h>
8 #include <linux/pkeys.h>
10 #include <trace/events/tlb.h>
12 #include <asm/pgalloc.h>
13 #include <asm/tlbflush.h>
14 #include <asm/paravirt.h>
17 extern atomic64_t last_mm_ctx_id;
19 #ifndef CONFIG_PARAVIRT
20 static inline void paravirt_activate_mm(struct mm_struct *prev,
21 struct mm_struct *next)
24 #endif /* !CONFIG_PARAVIRT */
26 #ifdef CONFIG_PERF_EVENTS
27 extern struct static_key rdpmc_always_available;
29 static inline void load_mm_cr4(struct mm_struct *mm)
31 if (static_key_false(&rdpmc_always_available) ||
32 atomic_read(&mm->context.perf_rdpmc_allowed))
33 cr4_set_bits(X86_CR4_PCE);
35 cr4_clear_bits(X86_CR4_PCE);
38 static inline void load_mm_cr4(struct mm_struct *mm) {}
41 #ifdef CONFIG_MODIFY_LDT_SYSCALL
43 * ldt_structs can be allocated, used, and freed, but they are never
44 * modified while live.
48 * Xen requires page-aligned LDTs with special permissions. This is
49 * needed to prevent us from installing evil descriptors such as
50 * call gates. On native, we could merge the ldt_struct and LDT
51 * allocations, but it's not worth trying to optimize.
53 struct desc_struct *entries;
54 unsigned int nr_entries;
57 * If PTI is in use, then the entries array is not mapped while we're
58 * in user mode. The whole array will be aliased at the addressed
59 * given by ldt_slot_va(slot). We use two slots so that we can allocate
60 * and map, and enable a new LDT without invalidating the mapping
61 * of an older, still-in-use LDT.
63 * slot will be -1 if this LDT doesn't have an alias mapping.
68 /* This is a multiple of PAGE_SIZE. */
69 #define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
71 static inline void *ldt_slot_va(int slot)
74 return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
77 return (void *)fix_to_virt(FIX_HOLE);
82 * Used for LDT copy/destruction.
84 static inline void init_new_context_ldt(struct mm_struct *mm)
86 mm->context.ldt = NULL;
87 init_rwsem(&mm->context.ldt_usr_sem);
89 int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
90 void destroy_context_ldt(struct mm_struct *mm);
91 void ldt_arch_exit_mmap(struct mm_struct *mm);
92 #else /* CONFIG_MODIFY_LDT_SYSCALL */
93 static inline void init_new_context_ldt(struct mm_struct *mm) { }
94 static inline int ldt_dup_context(struct mm_struct *oldmm,
99 static inline void destroy_context_ldt(struct mm_struct *mm) { }
100 static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { }
103 static inline void load_mm_ldt(struct mm_struct *mm)
105 #ifdef CONFIG_MODIFY_LDT_SYSCALL
106 struct ldt_struct *ldt;
108 /* READ_ONCE synchronizes with smp_store_release */
109 ldt = READ_ONCE(mm->context.ldt);
112 * Any change to mm->context.ldt is followed by an IPI to all
113 * CPUs with the mm active. The LDT will not be freed until
114 * after the IPI is handled by all such CPUs. This means that,
115 * if the ldt_struct changes before we return, the values we see
116 * will be safe, and the new values will be loaded before we run
119 * NB: don't try to convert this to use RCU without extreme care.
120 * We would still need IRQs off, because we don't want to change
121 * the local LDT after an IPI loaded a newer value than the one
126 if (static_cpu_has(X86_FEATURE_PTI)) {
127 if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) {
129 * Whoops -- either the new LDT isn't mapped
130 * (if slot == -1) or is mapped into a bogus
131 * slot (if slot > 1).
138 * If page table isolation is enabled, ldt->entries
139 * will not be mapped in the userspace pagetables.
140 * Tell the CPU to access the LDT through the alias
141 * at ldt_slot_va(ldt->slot).
143 set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries);
145 set_ldt(ldt->entries, ldt->nr_entries);
155 static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
157 #ifdef CONFIG_MODIFY_LDT_SYSCALL
159 * Load the LDT if either the old or new mm had an LDT.
161 * An mm will never go from having an LDT to not having an LDT. Two
162 * mms never share an LDT, so we don't gain anything by checking to
163 * see whether the LDT changed. There's also no guarantee that
164 * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
165 * then prev->context.ldt will also be non-NULL.
167 * If we really cared, we could optimize the case where prev == next
168 * and we're exiting lazy mode. Most of the time, if this happens,
169 * we don't actually need to reload LDTR, but modify_ldt() is mostly
170 * used by legacy code and emulators where we don't need this level of
173 * This uses | instead of || because it generates better code.
175 if (unlikely((unsigned long)prev->context.ldt |
176 (unsigned long)next->context.ldt))
180 DEBUG_LOCKS_WARN_ON(preemptible());
183 void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
186 * Init a new mm. Used on mm copies, like at fork()
187 * and on mm's that are brand-new, like at execve().
189 static inline int init_new_context(struct task_struct *tsk,
190 struct mm_struct *mm)
192 mutex_init(&mm->context.lock);
194 mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
195 atomic64_set(&mm->context.tlb_gen, 0);
197 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
198 if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
199 /* pkey 0 is the default and allocated implicitly */
200 mm->context.pkey_allocation_map = 0x1;
201 /* -1 means unallocated or invalid */
202 mm->context.execute_only_pkey = -1;
205 init_new_context_ldt(mm);
208 static inline void destroy_context(struct mm_struct *mm)
210 destroy_context_ldt(mm);
213 extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
214 struct task_struct *tsk);
216 extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
217 struct task_struct *tsk);
218 #define switch_mm_irqs_off switch_mm_irqs_off
220 #define activate_mm(prev, next) \
222 paravirt_activate_mm((prev), (next)); \
223 switch_mm((prev), (next), NULL); \
227 #define deactivate_mm(tsk, mm) \
232 #define deactivate_mm(tsk, mm) \
235 loadsegment(fs, 0); \
239 static inline void arch_dup_pkeys(struct mm_struct *oldmm,
240 struct mm_struct *mm)
242 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
243 if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
246 /* Duplicate the oldmm pkey state in mm: */
247 mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
248 mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
252 static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
254 arch_dup_pkeys(oldmm, mm);
255 paravirt_arch_dup_mmap(oldmm, mm);
256 return ldt_dup_context(oldmm, mm);
259 static inline void arch_exit_mmap(struct mm_struct *mm)
261 paravirt_arch_exit_mmap(mm);
262 ldt_arch_exit_mmap(mm);
266 static inline bool is_64bit_mm(struct mm_struct *mm)
268 return !IS_ENABLED(CONFIG_IA32_EMULATION) ||
269 !(mm->context.ia32_compat == TIF_IA32);
272 static inline bool is_64bit_mm(struct mm_struct *mm)
278 static inline void arch_bprm_mm_init(struct mm_struct *mm,
279 struct vm_area_struct *vma)
284 static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
285 unsigned long start, unsigned long end)
288 * mpx_notify_unmap() goes and reads a rarely-hot
289 * cacheline in the mm_struct. That can be expensive
290 * enough to be seen in profiles.
292 * The mpx_notify_unmap() call and its contents have been
293 * observed to affect munmap() performance on hardware
294 * where MPX is not present.
296 * The unlikely() optimizes for the fast case: no MPX
297 * in the CPU, or no MPX use in the process. Even if
298 * we get this wrong (in the unlikely event that MPX
299 * is widely enabled on some system) the overhead of
300 * MPX itself (reading bounds tables) is expected to
301 * overwhelm the overhead of getting this unlikely()
302 * consistently wrong.
304 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
305 mpx_notify_unmap(mm, vma, start, end);
308 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
309 static inline int vma_pkey(struct vm_area_struct *vma)
311 unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 |
312 VM_PKEY_BIT2 | VM_PKEY_BIT3;
314 return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT;
317 static inline int vma_pkey(struct vm_area_struct *vma)
324 * We only want to enforce protection keys on the current process
325 * because we effectively have no access to PKRU for other
326 * processes or any way to tell *which * PKRU in a threaded
327 * process we could use.
329 * So do not enforce things if the VMA is not from the current
330 * mm, or if we are in a kernel thread.
332 static inline bool vma_is_foreign(struct vm_area_struct *vma)
337 * Should PKRU be enforced on the access to this VMA? If
338 * the VMA is from another process, then PKRU has no
339 * relevance and should not be enforced.
341 if (current->mm != vma->vm_mm)
347 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
348 bool write, bool execute, bool foreign)
350 /* pkeys never affect instruction fetches */
353 /* allow access if the VMA is not one from this process */
354 if (foreign || vma_is_foreign(vma))
356 return __pkru_allows_pkey(vma_pkey(vma), write);
360 * This can be used from process context to figure out what the value of
361 * CR3 is without needing to do a (slow) __read_cr3().
363 * It's intended to be used for code like KVM that sneakily changes CR3
364 * and needs to restore it. It needs to be used very carefully.
366 static inline unsigned long __get_current_cr3_fast(void)
368 unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
369 this_cpu_read(cpu_tlbstate.loaded_mm_asid));
371 /* For now, be very restrictive about when this can be called. */
372 VM_WARN_ON(in_nmi() || preemptible());
374 VM_BUG_ON(cr3 != __read_cr3());
378 #endif /* _ASM_X86_MMU_CONTEXT_H */