1 #include <linux/init.h>
4 #include <linux/spinlock.h>
6 #include <linux/interrupt.h>
7 #include <linux/export.h>
9 #include <linux/debugfs.h>
11 #include <asm/tlbflush.h>
12 #include <asm/mmu_context.h>
13 #include <asm/nospec-branch.h>
14 #include <asm/cache.h>
16 #include <asm/uv/uv.h>
17 #include <asm/kaiser.h>
20 * TLB flushing, formerly SMP-only
23 * These mean you can really definitely utterly forget about
24 * writing to user space from interrupts. (Its not allowed anyway).
26 * Optimizations Manfred Spraul <manfred@colorfullife.com>
28 * More scalable flush, from Andi Kleen
30 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
34 * Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is
35 * stored in cpu_tlb_state.last_user_mm_ibpb.
37 #define LAST_USER_MM_IBPB 0x1UL
39 atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
41 struct flush_tlb_info {
42 struct mm_struct *flush_mm;
43 unsigned long flush_start;
44 unsigned long flush_end;
47 static void load_new_mm_cr3(pgd_t *pgdir)
49 unsigned long new_mm_cr3 = __pa(pgdir);
53 * We reuse the same PCID for different tasks, so we must
54 * flush all the entries for the PCID out when we change tasks.
55 * Flush KERN below, flush USER when returning to userspace in
56 * kaiser's SWITCH_USER_CR3 (_SWITCH_TO_USER_CR3) macro.
58 * invpcid_flush_single_context(X86_CR3_PCID_ASID_USER) could
59 * do it here, but can only be used if X86_FEATURE_INVPCID is
60 * available - and many machines support pcid without invpcid.
62 * If X86_CR3_PCID_KERN_FLUSH actually added something, then it
63 * would be needed in the write_cr3() below - if PCIDs enabled.
65 BUILD_BUG_ON(X86_CR3_PCID_KERN_FLUSH);
66 kaiser_flush_tlb_on_return_to_user();
70 * Caution: many callers of this function expect
71 * that load_cr3() is serializing and orders TLB
72 * fills with respect to the mm_cpumask writes.
74 write_cr3(new_mm_cr3);
78 * We cannot call mmdrop() because we are in interrupt context,
79 * instead update mm->cpu_vm_mask.
81 void leave_mm(int cpu)
83 struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
84 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
86 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
87 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
88 load_new_mm_cr3(swapper_pg_dir);
90 * This gets called in the idle path where RCU
91 * functions differently. Tracing normally
92 * uses RCU, so we have to call the tracepoint
95 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
98 EXPORT_SYMBOL_GPL(leave_mm);
100 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
101 struct task_struct *tsk)
105 local_irq_save(flags);
106 switch_mm_irqs_off(prev, next, tsk);
107 local_irq_restore(flags);
110 static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
112 unsigned long next_tif = task_thread_info(next)->flags;
113 unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
115 return (unsigned long)next->mm | ibpb;
118 static void cond_ibpb(struct task_struct *next)
120 if (!next || !next->mm)
124 * Both, the conditional and the always IBPB mode use the mm
125 * pointer to avoid the IBPB when switching between tasks of the
126 * same process. Using the mm pointer instead of mm->context.ctx_id
127 * opens a hypothetical hole vs. mm_struct reuse, which is more or
128 * less impossible to control by an attacker. Aside of that it
129 * would only affect the first schedule so the theoretically
130 * exposed data is not really interesting.
132 if (static_branch_likely(&switch_mm_cond_ibpb)) {
133 unsigned long prev_mm, next_mm;
136 * This is a bit more complex than the always mode because
137 * it has to handle two cases:
139 * 1) Switch from a user space task (potential attacker)
140 * which has TIF_SPEC_IB set to a user space task
141 * (potential victim) which has TIF_SPEC_IB not set.
143 * 2) Switch from a user space task (potential attacker)
144 * which has TIF_SPEC_IB not set to a user space task
145 * (potential victim) which has TIF_SPEC_IB set.
147 * This could be done by unconditionally issuing IBPB when
148 * a task which has TIF_SPEC_IB set is either scheduled in
149 * or out. Though that results in two flushes when:
151 * - the same user space task is scheduled out and later
152 * scheduled in again and only a kernel thread ran in
155 * - a user space task belonging to the same process is
156 * scheduled in after a kernel thread ran in between
158 * - a user space task belonging to the same process is
159 * scheduled in immediately.
161 * Optimize this with reasonably small overhead for the
162 * above cases. Mangle the TIF_SPEC_IB bit into the mm
163 * pointer of the incoming task which is stored in
164 * cpu_tlbstate.last_user_mm_ibpb for comparison.
166 next_mm = mm_mangle_tif_spec_ib(next);
167 prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb);
170 * Issue IBPB only if the mm's are different and one or
171 * both have the IBPB bit set.
173 if (next_mm != prev_mm &&
174 (next_mm | prev_mm) & LAST_USER_MM_IBPB)
175 indirect_branch_prediction_barrier();
177 this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm);
180 if (static_branch_unlikely(&switch_mm_always_ibpb)) {
182 * Only flush when switching to a user space task with a
183 * different context than the user space task which ran
186 if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) {
187 indirect_branch_prediction_barrier();
188 this_cpu_write(cpu_tlbstate.last_user_mm, next->mm);
193 void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
194 struct task_struct *tsk)
196 unsigned cpu = smp_processor_id();
198 if (likely(prev != next)) {
200 * Avoid user/user BTB poisoning by flushing the branch
201 * predictor when switching between processes. This stops
202 * one process from doing Spectre-v2 attacks on another.
206 if (IS_ENABLED(CONFIG_VMAP_STACK)) {
208 * If our current stack is in vmalloc space and isn't
209 * mapped in the new pgd, we'll double-fault. Forcibly
212 unsigned int stack_pgd_index = pgd_index(current_stack_pointer);
214 pgd_t *pgd = next->pgd + stack_pgd_index;
216 if (unlikely(pgd_none(*pgd)))
217 set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
220 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
221 this_cpu_write(cpu_tlbstate.active_mm, next);
223 cpumask_set_cpu(cpu, mm_cpumask(next));
226 * Re-load page tables.
228 * This logic has an ordering constraint:
230 * CPU 0: Write to a PTE for 'next'
231 * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
232 * CPU 1: set bit 1 in next's mm_cpumask
233 * CPU 1: load from the PTE that CPU 0 writes (implicit)
235 * We need to prevent an outcome in which CPU 1 observes
236 * the new PTE value and CPU 0 observes bit 1 clear in
237 * mm_cpumask. (If that occurs, then the IPI will never
238 * be sent, and CPU 0's TLB will contain a stale entry.)
240 * The bad outcome can occur if either CPU's load is
241 * reordered before that CPU's store, so both CPUs must
242 * execute full barriers to prevent this from happening.
244 * Thus, switch_mm needs a full barrier between the
245 * store to mm_cpumask and any operation that could load
246 * from next->pgd. TLB fills are special and can happen
247 * due to instruction fetches or for no reason at all,
248 * and neither LOCK nor MFENCE orders them.
249 * Fortunately, load_cr3() is serializing and gives the
250 * ordering guarantee we need.
253 load_new_mm_cr3(next->pgd);
255 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
257 /* Stop flush ipis for the previous mm */
258 cpumask_clear_cpu(cpu, mm_cpumask(prev));
260 /* Load per-mm CR4 state */
263 #ifdef CONFIG_MODIFY_LDT_SYSCALL
265 * Load the LDT, if the LDT is different.
267 * It's possible that prev->context.ldt doesn't match
268 * the LDT register. This can happen if leave_mm(prev)
269 * was called and then modify_ldt changed
270 * prev->context.ldt but suppressed an IPI to this CPU.
271 * In this case, prev->context.ldt != NULL, because we
272 * never set context.ldt to NULL while the mm still
273 * exists. That means that next->context.ldt !=
274 * prev->context.ldt, because mms never share an LDT.
276 if (unlikely(prev->context.ldt != next->context.ldt))
280 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
281 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
283 if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
285 * On established mms, the mm_cpumask is only changed
286 * from irq context, from ptep_clear_flush() while in
287 * lazy tlb mode, and here. Irqs are blocked during
288 * schedule, protecting us from simultaneous changes.
290 cpumask_set_cpu(cpu, mm_cpumask(next));
293 * We were in lazy tlb mode and leave_mm disabled
294 * tlb flush IPI delivery. We must reload CR3
295 * to make sure to use no freed page tables.
297 * As above, load_cr3() is serializing and orders TLB
298 * fills with respect to the mm_cpumask write.
300 load_new_mm_cr3(next->pgd);
301 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
309 * The flush IPI assumes that a thread switch happens in this order:
310 * [cpu0: the cpu that switches]
311 * 1) switch_mm() either 1a) or 1b)
312 * 1a) thread switch to a different mm
313 * 1a1) set cpu_tlbstate to TLBSTATE_OK
314 * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
315 * if cpu0 was in lazy tlb mode.
316 * 1a2) update cpu active_mm
317 * Now cpu0 accepts tlb flushes for the new mm.
318 * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
319 * Now the other cpus will send tlb flush ipis.
321 * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
322 * Stop ipi delivery for the old mm. This is not synchronized with
323 * the other cpus, but flush_tlb_func ignore flush ipis for the wrong
324 * mm, and in the worst case we perform a superfluous tlb flush.
325 * 1b) thread switch without mm change
326 * cpu active_mm is correct, cpu0 already handles flush ipis.
327 * 1b1) set cpu_tlbstate to TLBSTATE_OK
328 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
329 * Atomically set the bit [other cpus will start sending flush ipis],
331 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
332 * 2) switch %%esp, ie current
334 * The interrupt must handle 2 special cases:
335 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
336 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
337 * runs in kernel space, the cpu could load tlb entries for user space
340 * The good news is that cpu_tlbstate is local to each cpu, no
341 * write/read ordering problems.
345 * TLB flush funcation:
346 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
347 * 2) Leave the mm if we are in the lazy tlb mode.
349 static void flush_tlb_func(void *info)
351 struct flush_tlb_info *f = info;
353 inc_irq_stat(irq_tlb_count);
355 if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
358 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
359 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
360 if (f->flush_end == TLB_FLUSH_ALL) {
362 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL);
365 unsigned long nr_pages =
366 (f->flush_end - f->flush_start) / PAGE_SIZE;
367 addr = f->flush_start;
368 while (addr < f->flush_end) {
369 __flush_tlb_single(addr);
372 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages);
375 leave_mm(smp_processor_id());
379 void native_flush_tlb_others(const struct cpumask *cpumask,
380 struct mm_struct *mm, unsigned long start,
383 struct flush_tlb_info info;
386 info.flush_start = start;
387 info.flush_end = end;
389 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
390 if (end == TLB_FLUSH_ALL)
391 trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
393 trace_tlb_flush(TLB_REMOTE_SEND_IPI,
394 (end - start) >> PAGE_SHIFT);
396 if (is_uv_system()) {
399 cpu = smp_processor_id();
400 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
402 smp_call_function_many(cpumask, flush_tlb_func,
406 smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
410 * See Documentation/x86/tlb.txt for details. We choose 33
411 * because it is large enough to cover the vast majority (at
412 * least 95%) of allocations, and is small enough that we are
413 * confident it will not cause too much overhead. Each single
414 * flush is about 100 ns, so this caps the maximum overhead at
417 * This is in units of pages.
419 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
421 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
422 unsigned long end, unsigned long vmflag)
425 /* do a global flush by default */
426 unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
430 if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
431 base_pages_to_flush = (end - start) >> PAGE_SHIFT;
432 if (base_pages_to_flush > tlb_single_page_flush_ceiling)
433 base_pages_to_flush = TLB_FLUSH_ALL;
435 if (current->active_mm != mm) {
436 /* Synchronize with switch_mm. */
443 leave_mm(smp_processor_id());
445 /* Synchronize with switch_mm. */
452 * Both branches below are implicit full barriers (MOV to CR or
453 * INVLPG) that synchronize with switch_mm.
455 if (base_pages_to_flush == TLB_FLUSH_ALL) {
456 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
459 /* flush range by one by one 'invlpg' */
460 for (addr = start; addr < end; addr += PAGE_SIZE) {
461 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
462 __flush_tlb_single(addr);
465 trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
467 if (base_pages_to_flush == TLB_FLUSH_ALL) {
471 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
472 flush_tlb_others(mm_cpumask(mm), mm, start, end);
476 static void do_flush_tlb_all(void *info)
478 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
480 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
481 leave_mm(smp_processor_id());
484 void flush_tlb_all(void)
486 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
487 on_each_cpu(do_flush_tlb_all, NULL, 1);
490 static void do_kernel_range_flush(void *info)
492 struct flush_tlb_info *f = info;
495 /* flush range by one by one 'invlpg' */
496 for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
497 __flush_tlb_single(addr);
500 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
503 /* Balance as user space task's flush, a bit conservative */
504 if (end == TLB_FLUSH_ALL ||
505 (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
506 on_each_cpu(do_flush_tlb_all, NULL, 1);
508 struct flush_tlb_info info;
509 info.flush_start = start;
510 info.flush_end = end;
511 on_each_cpu(do_kernel_range_flush, &info, 1);
515 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
516 size_t count, loff_t *ppos)
521 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
522 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
525 static ssize_t tlbflush_write_file(struct file *file,
526 const char __user *user_buf, size_t count, loff_t *ppos)
532 len = min(count, sizeof(buf) - 1);
533 if (copy_from_user(buf, user_buf, len))
537 if (kstrtoint(buf, 0, &ceiling))
543 tlb_single_page_flush_ceiling = ceiling;
547 static const struct file_operations fops_tlbflush = {
548 .read = tlbflush_read_file,
549 .write = tlbflush_write_file,
550 .llseek = default_llseek,
553 static int __init create_tlb_single_page_flush_ceiling(void)
555 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
556 arch_debugfs_dir, NULL, &fops_tlbflush);
559 late_initcall(create_tlb_single_page_flush_ceiling);