1 #ifndef _ASM_X86_MMU_CONTEXT_H
2 #define _ASM_X86_MMU_CONTEXT_H
5 #include <linux/atomic.h>
6 #include <linux/mm_types.h>
8 #include <trace/events/tlb.h>
10 #include <asm/pgalloc.h>
11 #include <asm/tlbflush.h>
12 #include <asm/paravirt.h>
15 extern atomic64_t last_mm_ctx_id;
17 #ifndef CONFIG_PARAVIRT
18 static inline void paravirt_activate_mm(struct mm_struct *prev,
19 struct mm_struct *next)
22 #endif /* !CONFIG_PARAVIRT */
24 #ifdef CONFIG_PERF_EVENTS
25 extern struct static_key rdpmc_always_available;
27 static inline void load_mm_cr4(struct mm_struct *mm)
29 if (static_key_false(&rdpmc_always_available) ||
30 atomic_read(&mm->context.perf_rdpmc_allowed))
31 cr4_set_bits(X86_CR4_PCE);
33 cr4_clear_bits(X86_CR4_PCE);
36 static inline void load_mm_cr4(struct mm_struct *mm) {}
39 #ifdef CONFIG_MODIFY_LDT_SYSCALL
41 * ldt_structs can be allocated, used, and freed, but they are never
42 * modified while live.
46 * Xen requires page-aligned LDTs with special permissions. This is
47 * needed to prevent us from installing evil descriptors such as
48 * call gates. On native, we could merge the ldt_struct and LDT
49 * allocations, but it's not worth trying to optimize.
51 struct desc_struct *entries;
56 * Used for LDT copy/destruction.
58 int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm);
59 void destroy_context_ldt(struct mm_struct *mm);
60 #else /* CONFIG_MODIFY_LDT_SYSCALL */
61 static inline int init_new_context_ldt(struct task_struct *tsk,
66 static inline void destroy_context_ldt(struct mm_struct *mm) {}
69 static inline void load_mm_ldt(struct mm_struct *mm)
71 #ifdef CONFIG_MODIFY_LDT_SYSCALL
72 struct ldt_struct *ldt;
74 /* lockless_dereference synchronizes with smp_store_release */
75 ldt = lockless_dereference(mm->context.ldt);
78 * Any change to mm->context.ldt is followed by an IPI to all
79 * CPUs with the mm active. The LDT will not be freed until
80 * after the IPI is handled by all such CPUs. This means that,
81 * if the ldt_struct changes before we return, the values we see
82 * will be safe, and the new values will be loaded before we run
85 * NB: don't try to convert this to use RCU without extreme care.
86 * We would still need IRQs off, because we don't want to change
87 * the local LDT after an IPI loaded a newer value than the one
92 set_ldt(ldt->entries, ldt->size);
99 DEBUG_LOCKS_WARN_ON(preemptible());
102 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
104 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
105 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
108 static inline int init_new_context(struct task_struct *tsk,
109 struct mm_struct *mm)
111 mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
112 return init_new_context_ldt(tsk, mm);
114 static inline void destroy_context(struct mm_struct *mm)
116 destroy_context_ldt(mm);
119 extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
120 struct task_struct *tsk);
122 extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
123 struct task_struct *tsk);
124 #define switch_mm_irqs_off switch_mm_irqs_off
126 #define activate_mm(prev, next) \
128 paravirt_activate_mm((prev), (next)); \
129 switch_mm((prev), (next), NULL); \
133 #define deactivate_mm(tsk, mm) \
138 #define deactivate_mm(tsk, mm) \
141 loadsegment(fs, 0); \
145 static inline void arch_dup_mmap(struct mm_struct *oldmm,
146 struct mm_struct *mm)
148 paravirt_arch_dup_mmap(oldmm, mm);
151 static inline void arch_exit_mmap(struct mm_struct *mm)
153 paravirt_arch_exit_mmap(mm);
157 static inline bool is_64bit_mm(struct mm_struct *mm)
159 return !config_enabled(CONFIG_IA32_EMULATION) ||
160 !(mm->context.ia32_compat == TIF_IA32);
163 static inline bool is_64bit_mm(struct mm_struct *mm)
169 static inline void arch_bprm_mm_init(struct mm_struct *mm,
170 struct vm_area_struct *vma)
175 static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
176 unsigned long start, unsigned long end)
179 * mpx_notify_unmap() goes and reads a rarely-hot
180 * cacheline in the mm_struct. That can be expensive
181 * enough to be seen in profiles.
183 * The mpx_notify_unmap() call and its contents have been
184 * observed to affect munmap() performance on hardware
185 * where MPX is not present.
187 * The unlikely() optimizes for the fast case: no MPX
188 * in the CPU, or no MPX use in the process. Even if
189 * we get this wrong (in the unlikely event that MPX
190 * is widely enabled on some system) the overhead of
191 * MPX itself (reading bounds tables) is expected to
192 * overwhelm the overhead of getting this unlikely()
193 * consistently wrong.
195 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
196 mpx_notify_unmap(mm, vma, start, end);
199 #endif /* _ASM_X86_MMU_CONTEXT_H */