2 * TLB flush routines for radix kernels.
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/hugetlb.h>
14 #include <linux/memblock.h>
15 #include <linux/mmu_context.h>
16 #include <linux/sched/mm.h>
18 #include <asm/ppc-opcode.h>
20 #include <asm/tlbflush.h>
21 #include <asm/trace.h>
22 #include <asm/cputhreads.h>
24 #define RIC_FLUSH_TLB 0
25 #define RIC_FLUSH_PWC 1
26 #define RIC_FLUSH_ALL 2
29 * tlbiel instruction for radix, set invalidation
30 * i.e., r=1 and is=01 or is=10 or is=11
32 static inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
34 unsigned int ric, unsigned int prs)
39 rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
40 rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
42 asm volatile(PPC_TLBIEL(%0, %1, %2, %3, 1)
43 : : "r"(rb), "r"(rs), "i"(ric), "i"(prs)
47 static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
51 asm volatile("ptesync": : :"memory");
54 * Flush the first set of the TLB, and the entire Page Walk Cache
55 * and partition table entries. Then flush the remaining sets of the
58 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0);
59 for (set = 1; set < num_sets; set++)
60 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 0);
62 /* Do the same for process scoped entries. */
63 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1);
64 for (set = 1; set < num_sets; set++)
65 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1);
67 asm volatile("ptesync": : :"memory");
70 void radix__tlbiel_all(unsigned int action)
75 case TLB_INVAL_SCOPE_GLOBAL:
78 case TLB_INVAL_SCOPE_LPID:
85 if (early_cpu_has_feature(CPU_FTR_ARCH_300))
86 tlbiel_all_isa300(POWER9_TLB_SETS_RADIX, is);
88 WARN(1, "%s called on pre-POWER9 CPU\n", __func__);
90 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
93 static inline void __tlbiel_pid(unsigned long pid, int set,
96 unsigned long rb,rs,prs,r;
98 rb = PPC_BIT(53); /* IS = 1 */
99 rb |= set << PPC_BITLSHIFT(51);
100 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
101 prs = 1; /* process scoped */
102 r = 1; /* radix format */
104 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
105 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
106 trace_tlbie(0, 1, rb, rs, ric, prs, r);
109 static inline void __tlbie_pid(unsigned long pid, unsigned long ric)
111 unsigned long rb,rs,prs,r;
113 rb = PPC_BIT(53); /* IS = 1 */
114 rs = pid << PPC_BITLSHIFT(31);
115 prs = 1; /* process scoped */
116 r = 1; /* radix format */
118 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
119 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
120 trace_tlbie(0, 0, rb, rs, ric, prs, r);
123 static inline void __tlbiel_lpid(unsigned long lpid, int set,
126 unsigned long rb,rs,prs,r;
128 rb = PPC_BIT(52); /* IS = 2 */
129 rb |= set << PPC_BITLSHIFT(51);
130 rs = 0; /* LPID comes from LPIDR */
131 prs = 0; /* partition scoped */
132 r = 1; /* radix format */
134 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
135 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
136 trace_tlbie(lpid, 1, rb, rs, ric, prs, r);
139 static inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
141 unsigned long rb,rs,prs,r;
143 rb = PPC_BIT(52); /* IS = 2 */
145 prs = 0; /* partition scoped */
146 r = 1; /* radix format */
148 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
149 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
150 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
153 static inline void __tlbiel_lpid_guest(unsigned long lpid, int set,
156 unsigned long rb,rs,prs,r;
158 rb = PPC_BIT(52); /* IS = 2 */
159 rb |= set << PPC_BITLSHIFT(51);
160 rs = 0; /* LPID comes from LPIDR */
161 prs = 1; /* process scoped */
162 r = 1; /* radix format */
164 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
165 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
166 trace_tlbie(lpid, 1, rb, rs, ric, prs, r);
170 static inline void __tlbiel_va(unsigned long va, unsigned long pid,
171 unsigned long ap, unsigned long ric)
173 unsigned long rb,rs,prs,r;
175 rb = va & ~(PPC_BITMASK(52, 63));
176 rb |= ap << PPC_BITLSHIFT(58);
177 rs = pid << PPC_BITLSHIFT(31);
178 prs = 1; /* process scoped */
179 r = 1; /* radix format */
181 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
182 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
183 trace_tlbie(0, 1, rb, rs, ric, prs, r);
186 static inline void __tlbie_va(unsigned long va, unsigned long pid,
187 unsigned long ap, unsigned long ric)
189 unsigned long rb,rs,prs,r;
191 rb = va & ~(PPC_BITMASK(52, 63));
192 rb |= ap << PPC_BITLSHIFT(58);
193 rs = pid << PPC_BITLSHIFT(31);
194 prs = 1; /* process scoped */
195 r = 1; /* radix format */
197 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
198 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
199 trace_tlbie(0, 0, rb, rs, ric, prs, r);
202 static inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid,
203 unsigned long ap, unsigned long ric)
205 unsigned long rb,rs,prs,r;
207 rb = va & ~(PPC_BITMASK(52, 63));
208 rb |= ap << PPC_BITLSHIFT(58);
210 prs = 0; /* partition scoped */
211 r = 1; /* radix format */
213 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
214 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
215 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
219 static inline void fixup_tlbie_va(unsigned long va, unsigned long pid,
222 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
223 asm volatile("ptesync": : :"memory");
224 __tlbie_va(va, 0, ap, RIC_FLUSH_TLB);
227 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
228 asm volatile("ptesync": : :"memory");
229 __tlbie_va(va, pid, ap, RIC_FLUSH_TLB);
233 static inline void fixup_tlbie_va_range(unsigned long va, unsigned long pid,
236 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
237 asm volatile("ptesync": : :"memory");
238 __tlbie_pid(0, RIC_FLUSH_TLB);
241 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
242 asm volatile("ptesync": : :"memory");
243 __tlbie_va(va, pid, ap, RIC_FLUSH_TLB);
247 static inline void fixup_tlbie_pid(unsigned long pid)
250 * We can use any address for the invalidation, pick one which is
251 * probably unused as an optimisation.
253 unsigned long va = ((1UL << 52) - 1);
255 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
256 asm volatile("ptesync": : :"memory");
257 __tlbie_pid(0, RIC_FLUSH_TLB);
260 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
261 asm volatile("ptesync": : :"memory");
262 __tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
267 static inline void fixup_tlbie_lpid_va(unsigned long va, unsigned long lpid,
270 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
271 asm volatile("ptesync": : :"memory");
272 __tlbie_lpid_va(va, 0, ap, RIC_FLUSH_TLB);
275 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
276 asm volatile("ptesync": : :"memory");
277 __tlbie_lpid_va(va, lpid, ap, RIC_FLUSH_TLB);
281 static inline void fixup_tlbie_lpid(unsigned long lpid)
284 * We can use any address for the invalidation, pick one which is
285 * probably unused as an optimisation.
287 unsigned long va = ((1UL << 52) - 1);
289 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
290 asm volatile("ptesync": : :"memory");
291 __tlbie_lpid(0, RIC_FLUSH_TLB);
294 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
295 asm volatile("ptesync": : :"memory");
296 __tlbie_lpid_va(va, lpid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
301 * We use 128 set in radix mode and 256 set in hpt mode.
303 static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
307 asm volatile("ptesync": : :"memory");
310 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
311 * also flush the entire Page Walk Cache.
313 __tlbiel_pid(pid, 0, ric);
315 /* For PWC, only one flush is needed */
316 if (ric == RIC_FLUSH_PWC) {
317 asm volatile("ptesync": : :"memory");
321 /* For the remaining sets, just flush the TLB */
322 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
323 __tlbiel_pid(pid, set, RIC_FLUSH_TLB);
325 asm volatile("ptesync": : :"memory");
326 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
329 static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
331 asm volatile("ptesync": : :"memory");
334 * Workaround the fact that the "ric" argument to __tlbie_pid
335 * must be a compile-time contraint to match the "i" constraint
336 * in the asm statement.
340 __tlbie_pid(pid, RIC_FLUSH_TLB);
341 fixup_tlbie_pid(pid);
344 __tlbie_pid(pid, RIC_FLUSH_PWC);
348 __tlbie_pid(pid, RIC_FLUSH_ALL);
349 fixup_tlbie_pid(pid);
351 asm volatile("eieio; tlbsync; ptesync": : :"memory");
354 static inline void _tlbiel_lpid(unsigned long lpid, unsigned long ric)
358 VM_BUG_ON(mfspr(SPRN_LPID) != lpid);
360 asm volatile("ptesync": : :"memory");
363 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
364 * also flush the entire Page Walk Cache.
366 __tlbiel_lpid(lpid, 0, ric);
368 /* For PWC, only one flush is needed */
369 if (ric == RIC_FLUSH_PWC) {
370 asm volatile("ptesync": : :"memory");
374 /* For the remaining sets, just flush the TLB */
375 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
376 __tlbiel_lpid(lpid, set, RIC_FLUSH_TLB);
378 asm volatile("ptesync": : :"memory");
379 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
382 static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric)
384 asm volatile("ptesync": : :"memory");
387 * Workaround the fact that the "ric" argument to __tlbie_pid
388 * must be a compile-time contraint to match the "i" constraint
389 * in the asm statement.
393 __tlbie_lpid(lpid, RIC_FLUSH_TLB);
394 fixup_tlbie_lpid(lpid);
397 __tlbie_lpid(lpid, RIC_FLUSH_PWC);
401 __tlbie_lpid(lpid, RIC_FLUSH_ALL);
402 fixup_tlbie_lpid(lpid);
404 asm volatile("eieio; tlbsync; ptesync": : :"memory");
407 static inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric)
411 VM_BUG_ON(mfspr(SPRN_LPID) != lpid);
413 asm volatile("ptesync": : :"memory");
416 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
417 * also flush the entire Page Walk Cache.
419 __tlbiel_lpid_guest(lpid, 0, ric);
421 /* For PWC, only one flush is needed */
422 if (ric == RIC_FLUSH_PWC) {
423 asm volatile("ptesync": : :"memory");
427 /* For the remaining sets, just flush the TLB */
428 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
429 __tlbiel_lpid_guest(lpid, set, RIC_FLUSH_TLB);
431 asm volatile("ptesync": : :"memory");
432 asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
436 static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
437 unsigned long pid, unsigned long page_size,
441 unsigned long ap = mmu_get_ap(psize);
443 for (addr = start; addr < end; addr += page_size)
444 __tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
447 static inline void _tlbiel_va(unsigned long va, unsigned long pid,
448 unsigned long psize, unsigned long ric)
450 unsigned long ap = mmu_get_ap(psize);
452 asm volatile("ptesync": : :"memory");
453 __tlbiel_va(va, pid, ap, ric);
454 asm volatile("ptesync": : :"memory");
457 static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
458 unsigned long pid, unsigned long page_size,
459 unsigned long psize, bool also_pwc)
461 asm volatile("ptesync": : :"memory");
463 __tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
464 __tlbiel_va_range(start, end, pid, page_size, psize);
465 asm volatile("ptesync": : :"memory");
468 static inline void __tlbie_va_range(unsigned long start, unsigned long end,
469 unsigned long pid, unsigned long page_size,
473 unsigned long ap = mmu_get_ap(psize);
475 for (addr = start; addr < end; addr += page_size)
476 __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
478 fixup_tlbie_va_range(addr - page_size, pid, ap);
481 static inline void _tlbie_va(unsigned long va, unsigned long pid,
482 unsigned long psize, unsigned long ric)
484 unsigned long ap = mmu_get_ap(psize);
486 asm volatile("ptesync": : :"memory");
487 __tlbie_va(va, pid, ap, ric);
488 fixup_tlbie_va(va, pid, ap);
489 asm volatile("eieio; tlbsync; ptesync": : :"memory");
492 static inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid,
493 unsigned long psize, unsigned long ric)
495 unsigned long ap = mmu_get_ap(psize);
497 asm volatile("ptesync": : :"memory");
498 __tlbie_lpid_va(va, lpid, ap, ric);
499 fixup_tlbie_lpid_va(va, lpid, ap);
500 asm volatile("eieio; tlbsync; ptesync": : :"memory");
503 static inline void _tlbie_va_range(unsigned long start, unsigned long end,
504 unsigned long pid, unsigned long page_size,
505 unsigned long psize, bool also_pwc)
507 asm volatile("ptesync": : :"memory");
509 __tlbie_pid(pid, RIC_FLUSH_PWC);
510 __tlbie_va_range(start, end, pid, page_size, psize);
511 asm volatile("eieio; tlbsync; ptesync": : :"memory");
515 * Base TLB flushing operations:
517 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
518 * - flush_tlb_page(vma, vmaddr) flushes one page
519 * - flush_tlb_range(vma, start, end) flushes a range of pages
520 * - flush_tlb_kernel_range(start, end) flushes kernel pages
522 * - local_* variants of page and mm only apply to the current
525 void radix__local_flush_tlb_mm(struct mm_struct *mm)
530 pid = mm->context.id;
531 if (pid != MMU_NO_CONTEXT)
532 _tlbiel_pid(pid, RIC_FLUSH_TLB);
535 EXPORT_SYMBOL(radix__local_flush_tlb_mm);
538 void radix__local_flush_all_mm(struct mm_struct *mm)
543 pid = mm->context.id;
544 if (pid != MMU_NO_CONTEXT)
545 _tlbiel_pid(pid, RIC_FLUSH_ALL);
548 EXPORT_SYMBOL(radix__local_flush_all_mm);
549 #endif /* CONFIG_SMP */
551 void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
557 pid = mm->context.id;
558 if (pid != MMU_NO_CONTEXT)
559 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
563 void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
565 #ifdef CONFIG_HUGETLB_PAGE
566 /* need the return fix for nohash.c */
567 if (is_vm_hugetlb_page(vma))
568 return radix__local_flush_hugetlb_page(vma, vmaddr);
570 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
572 EXPORT_SYMBOL(radix__local_flush_tlb_page);
574 static bool mm_is_singlethreaded(struct mm_struct *mm)
576 if (atomic_read(&mm->context.copros) > 0)
578 if (atomic_read(&mm->mm_users) <= 1 && current->mm == mm)
583 static bool mm_needs_flush_escalation(struct mm_struct *mm)
586 * P9 nest MMU has issues with the page walk cache
587 * caching PTEs and not flushing them properly when
588 * RIC = 0 for a PID/LPID invalidate
590 if (atomic_read(&mm->context.copros) > 0)
596 static void do_exit_flush_lazy_tlb(void *arg)
598 struct mm_struct *mm = arg;
599 unsigned long pid = mm->context.id;
602 * A kthread could have done a mmget_not_zero() after the flushing CPU
603 * checked mm_is_singlethreaded, and be in the process of
604 * kthread_use_mm when interrupted here. In that case, current->mm will
605 * be set to mm, because kthread_use_mm() setting ->mm and switching to
606 * the mm is done with interrupts off.
608 if (current->mm == mm)
611 if (current->active_mm == mm) {
612 WARN_ON_ONCE(current->mm != NULL);
613 /* Is a kernel thread and is using mm as the lazy tlb */
615 current->active_mm = &init_mm;
616 switch_mm_irqs_off(mm, &init_mm, current);
620 atomic_dec(&mm->context.active_cpus);
621 cpumask_clear_cpu(smp_processor_id(), mm_cpumask(mm));
624 _tlbiel_pid(pid, RIC_FLUSH_ALL);
627 static void exit_flush_lazy_tlbs(struct mm_struct *mm)
630 * Would be nice if this was async so it could be run in
631 * parallel with our local flush, but generic code does not
632 * give a good API for it. Could extend the generic code or
633 * make a special powerpc IPI for flushing TLBs.
634 * For now it's not too performance critical.
636 smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb,
640 void radix__flush_tlb_mm(struct mm_struct *mm)
644 pid = mm->context.id;
645 if (unlikely(pid == MMU_NO_CONTEXT))
650 * Order loads of mm_cpumask vs previous stores to clear ptes before
651 * the invalidate. See barrier in switch_mm_irqs_off
654 if (!mm_is_thread_local(mm)) {
655 if (unlikely(mm_is_singlethreaded(mm))) {
656 exit_flush_lazy_tlbs(mm);
660 if (mm_needs_flush_escalation(mm))
661 _tlbie_pid(pid, RIC_FLUSH_ALL);
663 _tlbie_pid(pid, RIC_FLUSH_TLB);
666 _tlbiel_pid(pid, RIC_FLUSH_TLB);
670 EXPORT_SYMBOL(radix__flush_tlb_mm);
672 static void __flush_all_mm(struct mm_struct *mm, bool fullmm)
676 pid = mm->context.id;
677 if (unlikely(pid == MMU_NO_CONTEXT))
681 smp_mb(); /* see radix__flush_tlb_mm */
682 if (!mm_is_thread_local(mm)) {
683 if (unlikely(mm_is_singlethreaded(mm))) {
685 exit_flush_lazy_tlbs(mm);
689 _tlbie_pid(pid, RIC_FLUSH_ALL);
692 _tlbiel_pid(pid, RIC_FLUSH_ALL);
696 void radix__flush_all_mm(struct mm_struct *mm)
698 __flush_all_mm(mm, false);
700 EXPORT_SYMBOL(radix__flush_all_mm);
702 void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
704 tlb->need_flush_all = 1;
706 EXPORT_SYMBOL(radix__flush_tlb_pwc);
708 void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
713 pid = mm->context.id;
714 if (unlikely(pid == MMU_NO_CONTEXT))
718 smp_mb(); /* see radix__flush_tlb_mm */
719 if (!mm_is_thread_local(mm)) {
720 if (unlikely(mm_is_singlethreaded(mm))) {
721 exit_flush_lazy_tlbs(mm);
724 _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
727 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
732 void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
734 #ifdef CONFIG_HUGETLB_PAGE
735 if (is_vm_hugetlb_page(vma))
736 return radix__flush_hugetlb_page(vma, vmaddr);
738 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
740 EXPORT_SYMBOL(radix__flush_tlb_page);
742 #else /* CONFIG_SMP */
743 #define radix__flush_all_mm radix__local_flush_all_mm
744 #endif /* CONFIG_SMP */
746 void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
748 _tlbie_pid(0, RIC_FLUSH_ALL);
750 EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
752 #define TLB_FLUSH_ALL -1UL
755 * Number of pages above which we invalidate the entire PID rather than
756 * flush individual pages, for local and global flushes respectively.
758 * tlbie goes out to the interconnect and individual ops are more costly.
759 * It also does not iterate over sets like the local tlbiel variant when
760 * invalidating a full PID, so it has a far lower threshold to change from
761 * individual page flushes to full-pid flushes.
763 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
764 static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
766 static inline void __radix__flush_tlb_range(struct mm_struct *mm,
767 unsigned long start, unsigned long end,
768 bool flush_all_sizes)
772 unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
773 unsigned long page_size = 1UL << page_shift;
774 unsigned long nr_pages = (end - start) >> page_shift;
777 pid = mm->context.id;
778 if (unlikely(pid == MMU_NO_CONTEXT))
782 smp_mb(); /* see radix__flush_tlb_mm */
783 if (!mm_is_thread_local(mm)) {
784 if (unlikely(mm_is_singlethreaded(mm))) {
785 if (end != TLB_FLUSH_ALL) {
786 exit_flush_lazy_tlbs(mm);
791 full = (end == TLB_FLUSH_ALL ||
792 nr_pages > tlb_single_page_flush_ceiling);
796 full = (end == TLB_FLUSH_ALL ||
797 nr_pages > tlb_local_single_page_flush_ceiling);
802 _tlbiel_pid(pid, RIC_FLUSH_TLB);
804 if (mm_needs_flush_escalation(mm))
805 _tlbie_pid(pid, RIC_FLUSH_ALL);
807 _tlbie_pid(pid, RIC_FLUSH_TLB);
810 bool hflush = flush_all_sizes;
811 bool gflush = flush_all_sizes;
812 unsigned long hstart, hend;
813 unsigned long gstart, gend;
815 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
819 hstart = (start + PMD_SIZE - 1) & PMD_MASK;
820 hend = end & PMD_MASK;
826 gstart = (start + PUD_SIZE - 1) & PUD_MASK;
827 gend = end & PUD_MASK;
832 asm volatile("ptesync": : :"memory");
834 __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
836 __tlbiel_va_range(hstart, hend, pid,
837 PMD_SIZE, MMU_PAGE_2M);
839 __tlbiel_va_range(gstart, gend, pid,
840 PUD_SIZE, MMU_PAGE_1G);
841 asm volatile("ptesync": : :"memory");
843 __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
845 __tlbie_va_range(hstart, hend, pid,
846 PMD_SIZE, MMU_PAGE_2M);
848 __tlbie_va_range(gstart, gend, pid,
849 PUD_SIZE, MMU_PAGE_1G);
851 asm volatile("eieio; tlbsync; ptesync": : :"memory");
857 void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
861 #ifdef CONFIG_HUGETLB_PAGE
862 if (is_vm_hugetlb_page(vma))
863 return radix__flush_hugetlb_tlb_range(vma, start, end);
866 __radix__flush_tlb_range(vma->vm_mm, start, end, false);
868 EXPORT_SYMBOL(radix__flush_tlb_range);
870 static int radix_get_mmu_psize(int page_size)
874 if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
875 psize = mmu_virtual_psize;
876 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
878 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
886 * Flush partition scoped LPID address translation for all CPUs.
888 void radix__flush_tlb_lpid_page(unsigned int lpid,
890 unsigned long page_size)
892 int psize = radix_get_mmu_psize(page_size);
894 _tlbie_lpid_va(addr, lpid, psize, RIC_FLUSH_TLB);
896 EXPORT_SYMBOL_GPL(radix__flush_tlb_lpid_page);
899 * Flush partition scoped PWC from LPID for all CPUs.
901 void radix__flush_pwc_lpid(unsigned int lpid)
903 _tlbie_lpid(lpid, RIC_FLUSH_PWC);
905 EXPORT_SYMBOL_GPL(radix__flush_pwc_lpid);
908 * Flush partition scoped translations from LPID (=LPIDR)
910 void radix__local_flush_tlb_lpid(unsigned int lpid)
912 _tlbiel_lpid(lpid, RIC_FLUSH_ALL);
914 EXPORT_SYMBOL_GPL(radix__local_flush_tlb_lpid);
917 * Flush process scoped translations from LPID (=LPIDR).
918 * Important difference, the guest normally manages its own translations,
919 * but some cases e.g., vCPU CPU migration require KVM to flush.
921 void radix__local_flush_tlb_lpid_guest(unsigned int lpid)
923 _tlbiel_lpid_guest(lpid, RIC_FLUSH_ALL);
925 EXPORT_SYMBOL_GPL(radix__local_flush_tlb_lpid_guest);
928 static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
929 unsigned long end, int psize);
931 void radix__tlb_flush(struct mmu_gather *tlb)
934 struct mm_struct *mm = tlb->mm;
935 int page_size = tlb->page_size;
936 unsigned long start = tlb->start;
937 unsigned long end = tlb->end;
940 * if page size is not something we understand, do a full mm flush
942 * A "fullmm" flush must always do a flush_all_mm (RIC=2) flush
943 * that flushes the process table entry cache upon process teardown.
944 * See the comment for radix in arch_exit_mmap().
947 __flush_all_mm(mm, true);
948 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
949 } else if (mm_tlb_flush_nested(mm)) {
951 * If there is a concurrent invalidation that is clearing ptes,
952 * then it's possible this invalidation will miss one of those
953 * cleared ptes and miss flushing the TLB. If this invalidate
954 * returns before the other one flushes TLBs, that can result
955 * in it returning while there are still valid TLBs inside the
956 * range to be invalidated.
958 * See mm/memory.c:tlb_finish_mmu() for more details.
960 * The solution to this is ensure the entire range is always
961 * flushed here. The problem for powerpc is that the flushes
962 * are page size specific, so this "forced flush" would not
963 * do the right thing if there are a mix of page sizes in
964 * the range to be invalidated. So use __flush_tlb_range
965 * which invalidates all possible page sizes in the range.
967 * PWC flush probably is not be required because the core code
968 * shouldn't free page tables in this path, but accounting
969 * for the possibility makes us a bit more robust.
971 * need_flush_all is an uncommon case because page table
972 * teardown should be done with exclusive locks held (but
973 * after locks are dropped another invalidate could come
974 * in), it could be optimized further if necessary.
976 if (!tlb->need_flush_all)
977 __radix__flush_tlb_range(mm, start, end, true);
979 radix__flush_all_mm(mm);
981 } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
982 if (!tlb->need_flush_all)
983 radix__flush_tlb_mm(mm);
985 radix__flush_all_mm(mm);
987 if (!tlb->need_flush_all)
988 radix__flush_tlb_range_psize(mm, start, end, psize);
990 radix__flush_tlb_pwc_range_psize(mm, start, end, psize);
992 tlb->need_flush_all = 0;
995 static inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
996 unsigned long start, unsigned long end,
997 int psize, bool also_pwc)
1000 unsigned int page_shift = mmu_psize_defs[psize].shift;
1001 unsigned long page_size = 1UL << page_shift;
1002 unsigned long nr_pages = (end - start) >> page_shift;
1005 pid = mm->context.id;
1006 if (unlikely(pid == MMU_NO_CONTEXT))
1010 smp_mb(); /* see radix__flush_tlb_mm */
1011 if (!mm_is_thread_local(mm)) {
1012 if (unlikely(mm_is_singlethreaded(mm))) {
1013 if (end != TLB_FLUSH_ALL) {
1014 exit_flush_lazy_tlbs(mm);
1019 full = (end == TLB_FLUSH_ALL ||
1020 nr_pages > tlb_single_page_flush_ceiling);
1024 full = (end == TLB_FLUSH_ALL ||
1025 nr_pages > tlb_local_single_page_flush_ceiling);
1030 _tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
1032 if (mm_needs_flush_escalation(mm))
1035 _tlbie_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
1039 _tlbiel_va_range(start, end, pid, page_size, psize, also_pwc);
1041 _tlbie_va_range(start, end, pid, page_size, psize, also_pwc);
1046 void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
1047 unsigned long end, int psize)
1049 return __radix__flush_tlb_range_psize(mm, start, end, psize, false);
1052 static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
1053 unsigned long end, int psize)
1055 __radix__flush_tlb_range_psize(mm, start, end, psize, true);
1058 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1059 void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
1061 unsigned long pid, end;
1063 pid = mm->context.id;
1064 if (unlikely(pid == MMU_NO_CONTEXT))
1067 /* 4k page size, just blow the world */
1068 if (PAGE_SIZE == 0x1000) {
1069 radix__flush_all_mm(mm);
1073 end = addr + HPAGE_PMD_SIZE;
1075 /* Otherwise first do the PWC, then iterate the pages. */
1077 smp_mb(); /* see radix__flush_tlb_mm */
1078 if (!mm_is_thread_local(mm)) {
1079 if (unlikely(mm_is_singlethreaded(mm))) {
1080 exit_flush_lazy_tlbs(mm);
1083 _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
1086 _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
1091 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1093 void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
1094 unsigned long start, unsigned long end)
1096 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
1098 EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
1100 void radix__flush_tlb_all(void)
1102 unsigned long rb,prs,r,rs;
1103 unsigned long ric = RIC_FLUSH_ALL;
1105 rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
1106 prs = 0; /* partition scoped */
1107 r = 1; /* radix format */
1108 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
1110 asm volatile("ptesync": : :"memory");
1112 * now flush guest entries by passing PRS = 1 and LPID != 0
1114 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
1115 : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
1117 * now flush host entires by passing PRS = 0 and LPID == 0
1119 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
1120 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
1121 asm volatile("eieio; tlbsync; ptesync": : :"memory");
1124 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1125 extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
1127 unsigned long pid = mm->context.id;
1129 if (unlikely(pid == MMU_NO_CONTEXT))
1133 * If this context hasn't run on that CPU before and KVM is
1134 * around, there's a slim chance that the guest on another
1135 * CPU just brought in obsolete translation into the TLB of
1136 * this CPU due to a bad prefetch using the guest PID on
1137 * the way into the hypervisor.
1139 * We work around this here. If KVM is possible, we check if
1140 * any sibling thread is in KVM. If it is, the window may exist
1141 * and thus we flush that PID from the core.
1143 * A potential future improvement would be to mark which PIDs
1144 * have never been used on the system and avoid it if the PID
1145 * is new and the process has no other cpumask bit set.
1147 if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
1148 int cpu = smp_processor_id();
1149 int sib = cpu_first_thread_sibling(cpu);
1152 for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
1155 if (!cpu_possible(sib))
1157 if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu)
1161 _tlbiel_pid(pid, RIC_FLUSH_ALL);
1164 EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
1165 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */